V8 Project
v8::internal::compiler::Scheduler Class Reference

#include <scheduler.h>

+ Collaboration diagram for v8::internal::compiler::Scheduler:

Classes

struct  SchedulerData
 

Static Public Member Functions

static ScheduleComputeSchedule (Graph *graph)
 
static BasicBlockVectorComputeSpecialRPO (Schedule *schedule)
 
static void ComputeCFG (Graph *graph, Schedule *schedule)
 

Private Types

enum  Placement { kUnknown , kSchedulable , kFixed }
 

Private Member Functions

 Scheduler (Zone *zone, Graph *graph, Schedule *schedule)
 
SchedulerData DefaultSchedulerData ()
 
SchedulerDataGetData (Node *node)
 
void BuildCFG ()
 
Placement GetPlacement (Node *node)
 
int GetRPONumber (BasicBlock *block)
 
void GenerateImmediateDominatorTree ()
 
BasicBlock * GetCommonDominator (BasicBlock *b1, BasicBlock *b2)
 
void ScheduleEarly ()
 
void PrepareUses ()
 
void ScheduleLate ()
 
bool ConnectFloatingControl ()
 
void ConnectFloatingControlSubgraph (BasicBlock *block, Node *node)
 

Private Attributes

Zonezone_
 
Graphgraph_
 
Scheduleschedule_
 
NodeVectorVector scheduled_nodes_
 
NodeVector schedule_root_nodes_
 
ZoneVector< SchedulerDatanode_data_
 
bool has_floating_control_
 

Friends

class CFGBuilder
 
class ScheduleEarlyNodeVisitor
 
class PrepareUsesVisitor
 
class ScheduleLateNodeVisitor
 

Detailed Description

Definition at line 20 of file scheduler.h.

Member Enumeration Documentation

◆ Placement

Enumerator
kUnknown 
kSchedulable 
kFixed 

Definition at line 34 of file scheduler.h.

Constructor & Destructor Documentation

◆ Scheduler()

v8::internal::compiler::Scheduler::Scheduler ( Zone zone,
Graph graph,
Schedule schedule 
)
private

Definition at line 227 of file scheduler.cc.

228  : zone_(zone),
229  graph_(graph),
230  schedule_(schedule),
231  scheduled_nodes_(zone),
232  schedule_root_nodes_(zone),
234  has_floating_control_(false) {}
NodeVectorVector scheduled_nodes_
Definition: scheduler.h:50
SchedulerData DefaultSchedulerData()
Definition: scheduler.cc:221
ZoneVector< SchedulerData > node_data_
Definition: scheduler.h:52

Member Function Documentation

◆ BuildCFG()

void v8::internal::compiler::Scheduler::BuildCFG ( )
private

Definition at line 299 of file scheduler.cc.

299  {
300  Trace("---------------- CREATING CFG ------------------\n");
301  CFGBuilder cfg_builder(zone_, this);
302  cfg_builder.Run();
303  // Initialize per-block data.
305 }
ZoneVector< Node * > NodeVector
Definition: node.h:72
static void Trace(const char *msg,...)
Definition: scheduler.cc:21

References v8::internal::compiler::Schedule::BasicBlockCount(), v8::internal::compiler::CFGBuilder::Run(), schedule_, scheduled_nodes_, v8::internal::compiler::Trace(), and zone_.

Referenced by ComputeSchedule().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ComputeCFG()

static void v8::internal::compiler::Scheduler::ComputeCFG ( Graph graph,
Schedule schedule 
)
static

◆ ComputeSchedule()

Schedule * v8::internal::compiler::Scheduler::ComputeSchedule ( Graph graph)
static

Definition at line 237 of file scheduler.cc.

237  {
238  Schedule* schedule;
239  bool had_floating_control = false;
240  do {
241  Zone tmp_zone(graph->zone()->isolate());
242  schedule = new (graph->zone())
243  Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
244  Scheduler scheduler(&tmp_zone, graph, schedule);
245 
246  scheduler.BuildCFG();
247 
249  scheduler.GenerateImmediateDominatorTree();
250 
251  scheduler.PrepareUses();
252  scheduler.ScheduleEarly();
253  scheduler.ScheduleLate();
254 
255  had_floating_control = scheduler.ConnectFloatingControl();
256  } while (had_floating_control);
257 
258  return schedule;
259 }
Scheduler(Zone *zone, Graph *graph, Schedule *schedule)
Definition: scheduler.cc:227
static BasicBlockVector * ComputeSpecialRPO(Schedule *schedule)
Definition: scheduler.cc:933

References BuildCFG(), ComputeSpecialRPO(), ConnectFloatingControl(), GenerateImmediateDominatorTree(), v8::internal::Zone::isolate(), v8::internal::compiler::GenericGraphBase::NodeCount(), PrepareUses(), ScheduleEarly(), ScheduleLate(), and v8::internal::compiler::GenericGraphBase::zone().

Referenced by v8::internal::compiler::Pipeline::ComputeSchedule().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ComputeSpecialRPO()

BasicBlockVector * v8::internal::compiler::Scheduler::ComputeSpecialRPO ( Schedule schedule)
static

Definition at line 933 of file scheduler.cc.

933  {
934  Zone tmp_zone(schedule->zone()->isolate());
935  Zone* zone = &tmp_zone;
936  Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
937  // RPO should not have been computed for this schedule yet.
938  CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
939  CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
940 
941  // Perform an iterative RPO traversal using an explicit stack,
942  // recording backedges that form cycles. O(|B|).
943  ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
944  SpecialRPOStackFrame* stack =
945  zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
946  BasicBlock* entry = schedule->start();
947  BlockList* order = NULL;
948  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
949  int num_loops = 0;
950 
951  while (stack_depth > 0) {
952  int current = stack_depth - 1;
953  SpecialRPOStackFrame* frame = stack + current;
954 
955  if (frame->index < frame->block->SuccessorCount()) {
956  // Process the next successor.
957  BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
958  if (succ->rpo_number_ == kBlockVisited1) continue;
959  if (succ->rpo_number_ == kBlockOnStack) {
960  // The successor is on the stack, so this is a backedge (cycle).
961  backedges.Add(
962  std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone);
963  if (succ->loop_end_ < 0) {
964  // Assign a new loop number to the header if it doesn't have one.
965  succ->loop_end_ = num_loops++;
966  }
967  } else {
968  // Push the successor onto the stack.
969  DCHECK(succ->rpo_number_ == kBlockUnvisited1);
970  stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
971  }
972  } else {
973  // Finished with all successors; pop the stack and add the block.
974  order = order->Add(zone, frame->block);
975  frame->block->rpo_number_ = kBlockVisited1;
976  stack_depth--;
977  }
978  }
979 
980  // If no loops were encountered, then the order we computed was correct.
981  LoopInfo* loops = NULL;
982  if (num_loops != 0) {
983  // Otherwise, compute the loop information from the backedges in order
984  // to perform a traversal that groups loop bodies together.
985  loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
986  &backedges);
987 
988  // Initialize the "loop stack". Note the entry could be a loop header.
989  LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
990  order = NULL;
991 
992  // Perform an iterative post-order traversal, visiting loop bodies before
993  // edges that lead out of loops. Visits each block once, but linking loop
994  // sections together is linear in the loop size, so overall is
995  // O(|B| + max(loop_depth) * max(|loop|))
996  stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
997  while (stack_depth > 0) {
998  SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
999  BasicBlock* block = frame->block;
1000  BasicBlock* succ = NULL;
1001 
1002  if (frame->index < block->SuccessorCount()) {
1003  // Process the next normal successor.
1004  succ = block->SuccessorAt(frame->index++);
1005  } else if (block->IsLoopHeader()) {
1006  // Process additional outgoing edges from the loop header.
1007  if (block->rpo_number_ == kBlockOnStack) {
1008  // Finish the loop body the first time the header is left on the
1009  // stack.
1010  DCHECK(loop != NULL && loop->header == block);
1011  loop->start = order->Add(zone, block);
1012  order = loop->end;
1013  block->rpo_number_ = kBlockVisited2;
1014  // Pop the loop stack and continue visiting outgoing edges within the
1015  // the context of the outer loop, if any.
1016  loop = loop->prev;
1017  // We leave the loop header on the stack; the rest of this iteration
1018  // and later iterations will go through its outgoing edges list.
1019  }
1020 
1021  // Use the next outgoing edge if there are any.
1022  int outgoing_index = frame->index - block->SuccessorCount();
1023  LoopInfo* info = &loops[block->loop_end_];
1024  DCHECK(loop != info);
1025  if (info->outgoing != NULL &&
1026  outgoing_index < info->outgoing->length()) {
1027  succ = info->outgoing->at(outgoing_index);
1028  frame->index++;
1029  }
1030  }
1031 
1032  if (succ != NULL) {
1033  // Process the next successor.
1034  if (succ->rpo_number_ == kBlockOnStack) continue;
1035  if (succ->rpo_number_ == kBlockVisited2) continue;
1036  DCHECK(succ->rpo_number_ == kBlockUnvisited2);
1037  if (loop != NULL && !loop->members->Contains(succ->id())) {
1038  // The successor is not in the current loop or any nested loop.
1039  // Add it to the outgoing edges of this loop and visit it later.
1040  loop->AddOutgoing(zone, succ);
1041  } else {
1042  // Push the successor onto the stack.
1043  stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
1044  if (succ->IsLoopHeader()) {
1045  // Push the inner loop onto the loop stack.
1046  DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
1047  LoopInfo* next = &loops[succ->loop_end_];
1048  next->end = order;
1049  next->prev = loop;
1050  loop = next;
1051  }
1052  }
1053  } else {
1054  // Finished with all successors of the current block.
1055  if (block->IsLoopHeader()) {
1056  // If we are going to pop a loop header, then add its entire body.
1057  LoopInfo* info = &loops[block->loop_end_];
1058  for (BlockList* l = info->start; true; l = l->next) {
1059  if (l->next == info->end) {
1060  l->next = order;
1061  info->end = order;
1062  break;
1063  }
1064  }
1065  order = info->start;
1066  } else {
1067  // Pop a single node off the stack and add it to the order.
1068  order = order->Add(zone, block);
1069  block->rpo_number_ = kBlockVisited2;
1070  }
1071  stack_depth--;
1072  }
1073  }
1074  }
1075 
1076  // Construct the final order from the list.
1077  BasicBlockVector* final_order = &schedule->rpo_order_;
1078  order->Serialize(final_order);
1079 
1080  // Compute the correct loop header for every block and set the correct loop
1081  // ends.
1082  LoopInfo* current_loop = NULL;
1083  BasicBlock* current_header = NULL;
1084  int loop_depth = 0;
1085  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
1086  ++i) {
1087  BasicBlock* current = *i;
1088  current->loop_header_ = current_header;
1089  if (current->IsLoopHeader()) {
1090  loop_depth++;
1091  current_loop = &loops[current->loop_end_];
1092  BlockList* end = current_loop->end;
1093  current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
1094  : end->block->rpo_number_;
1095  current_header = current_loop->header;
1096  Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
1097  loop_depth);
1098  } else {
1099  while (current_header != NULL &&
1100  current->rpo_number_ >= current_header->loop_end_) {
1101  DCHECK(current_header->IsLoopHeader());
1102  DCHECK(current_loop != NULL);
1103  current_loop = current_loop->prev;
1104  current_header = current_loop == NULL ? NULL : current_loop->header;
1105  --loop_depth;
1106  }
1107  }
1108  current->loop_depth_ = loop_depth;
1109  if (current->loop_header_ == NULL) {
1110  Trace("B%d is not in a loop (depth == %d)\n", current->id(),
1111  current->loop_depth_);
1112  } else {
1113  Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
1114  current->loop_header_->id(), current->loop_depth_);
1115  }
1116  }
1117 
1118 #if DEBUG
1119  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1120  VerifySpecialRPO(num_loops, loops, final_order);
1121 #endif
1122  return final_order;
1123 }
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define DCHECK(condition)
Definition: logging.h:205
static const int kBlockOnStack
Definition: scheduler.cc:728
ZoneVector< BasicBlock * > BasicBlockVector
Definition: schedule.h:149
static const int kBlockUnvisited2
Definition: scheduler.cc:732
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
BasicBlockVector::iterator BasicBlockVectorIter
Definition: schedule.h:150
static const int kBlockUnvisited1
Definition: scheduler.cc:731
static LoopInfo * ComputeLoopInfo(Zone *zone, SpecialRPOStackFrame *queue, int num_loops, int num_blocks, ZoneList< std::pair< BasicBlock *, int > > *backedges)
Definition: scheduler.cc:786
static const int kBlockVisited2
Definition: scheduler.cc:730
static const int kBlockVisited1
Definition: scheduler.cc:729

References v8::internal::List< T, AllocationPolicy >::Add(), v8::internal::compiler::BlockList::Add(), v8::internal::compiler::LoopInfo::AddOutgoing(), v8::internal::List< T, AllocationPolicy >::at(), v8::internal::compiler::Schedule::BasicBlockCount(), v8::internal::compiler::SpecialRPOStackFrame::block, v8::internal::compiler::BlockList::block, CHECK_EQ, v8::internal::compiler::ComputeLoopInfo(), v8::internal::BitVector::Contains(), DCHECK, v8::internal::compiler::LoopInfo::end, v8::internal::compiler::LoopInfo::header, v8::internal::compiler::SpecialRPOStackFrame::index, v8::internal::Zone::isolate(), v8::internal::compiler::kBlockOnStack, v8::internal::compiler::kBlockUnvisited1, v8::internal::compiler::kBlockUnvisited2, v8::internal::compiler::kBlockVisited1, v8::internal::compiler::kBlockVisited2, v8::internal::compiler::LoopInfo::members, v8::internal::Zone::NewArray(), v8::internal::compiler::BlockList::next, NULL, v8::internal::compiler::LoopInfo::outgoing, v8::internal::compiler::LoopInfo::prev, v8::internal::compiler::Push(), v8::internal::compiler::Schedule::rpo_order_, v8::internal::compiler::BlockList::Serialize(), v8::internal::compiler::GenericGraph< V >::start(), v8::internal::compiler::LoopInfo::start, v8::internal::compiler::Trace(), and v8::internal::compiler::GenericGraphBase::zone().

Referenced by ComputeSchedule(), and v8::internal::compiler::RawMachineAssembler::Export().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ConnectFloatingControl()

bool v8::internal::compiler::Scheduler::ConnectFloatingControl ( )
private

Definition at line 631 of file scheduler.cc.

631  {
632  if (!has_floating_control_) return false;
633 
634  Trace("Connecting floating control...\n");
635 
636  // Process blocks and instructions backwards to find and connect floating
637  // control nodes into the control graph according to the block they were
638  // scheduled into.
639  int max = static_cast<int>(schedule_->rpo_order()->size());
640  for (int i = max - 1; i >= 0; i--) {
641  BasicBlock* block = schedule_->rpo_order()->at(i);
642  // TODO(titzer): we place at most one floating control structure per
643  // basic block because scheduling currently can interleave phis from
644  // one subgraph with the merges from another subgraph.
645  bool one_placed = false;
646  for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
647  Node* node = block->nodes_[j];
648  SchedulerData* data = GetData(node);
649  if (data->is_floating_control_ && !data->is_connected_control_ &&
650  !one_placed) {
651  Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(),
652  node->op()->mnemonic(), block->id());
653  ConnectFloatingControlSubgraph(block, node);
654  one_placed = true;
655  }
656  }
657  }
658 
659  return true;
660 }
BasicBlockVector * rpo_order()
Definition: schedule.h:279
SchedulerData * GetData(Node *node)
Definition: scheduler.h:59
void ConnectFloatingControlSubgraph(BasicBlock *block, Node *node)
Definition: scheduler.cc:663

References ConnectFloatingControlSubgraph(), GetData(), has_floating_control_, v8::internal::compiler::Scheduler::SchedulerData::is_connected_control_, v8::internal::compiler::Scheduler::SchedulerData::is_floating_control_, v8::internal::compiler::Schedule::rpo_order(), schedule_, and v8::internal::compiler::Trace().

Referenced by ComputeSchedule().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ConnectFloatingControlSubgraph()

void v8::internal::compiler::Scheduler::ConnectFloatingControlSubgraph ( BasicBlock *  block,
Node *  node 
)
private

Definition at line 663 of file scheduler.cc.

663  {
664  Node* block_start = block->nodes_[0];
665  DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
666  // Find the current "control successor" of the node that starts the block
667  // by searching the control uses for a control input edge from a connected
668  // control node.
669  Node* control_succ = NULL;
670  for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
671  ++i) {
672  Node::Edge edge = i.edge();
673  if (NodeProperties::IsControlEdge(edge) &&
674  GetData(edge.from())->is_connected_control_) {
675  DCHECK_EQ(NULL, control_succ);
676  control_succ = edge.from();
677  control_succ->ReplaceInput(edge.index(), end);
678  }
679  }
680  DCHECK_NE(NULL, control_succ);
681  Trace(" Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
682  end->id(), end->op()->mnemonic(), control_succ->id(),
683  control_succ->op()->mnemonic(), block_start->id(),
684  block_start->op()->mnemonic());
685 
686  // Find the "start" node of the control subgraph, which should be the
687  // unique node that is itself floating control but has a control input that
688  // is not floating.
689  Node* start = NULL;
690  ZoneQueue<Node*> queue(zone_);
691  queue.push(end);
692  GetData(end)->is_connected_control_ = true;
693  while (!queue.empty()) {
694  Node* node = queue.front();
695  queue.pop();
696  Trace(" Search #%d:%s for control subgraph start\n", node->id(),
697  node->op()->mnemonic());
698  int max = NodeProperties::PastControlIndex(node);
699  for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
700  Node* input = node->InputAt(i);
701  SchedulerData* data = GetData(input);
702  if (data->is_floating_control_) {
703  // {input} is floating control.
704  if (!data->is_connected_control_) {
705  // First time seeing {input} during this traversal, queue it.
706  queue.push(input);
707  data->is_connected_control_ = true;
708  }
709  } else {
710  // Otherwise, {node} is the start node, because it is floating control
711  // but is connected to {input} that is not floating control.
712  DCHECK_EQ(NULL, start); // There can be only one.
713  start = node;
714  }
715  }
716  }
717 
718  DCHECK_NE(NULL, start);
719  start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
720 
721  Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(),
722  start->op()->mnemonic(), block_start->id(),
723  block_start->op()->mnemonic());
724 }
static bool IsControlOpcode(Value val)
Definition: opcodes.h:280
static bool IsControlEdge(Node::Edge edge)
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
Node::Uses::iterator UseIter
Definition: node.h:81

References DCHECK, DCHECK_EQ, DCHECK_NE, v8::internal::compiler::NodeProperties::FirstControlIndex(), GetData(), v8::internal::compiler::Scheduler::SchedulerData::is_connected_control_, v8::internal::compiler::Scheduler::SchedulerData::is_floating_control_, v8::internal::compiler::NodeProperties::IsControlEdge(), v8::internal::compiler::IrOpcode::IsControlOpcode(), NULL, v8::internal::compiler::NodeProperties::PastControlIndex(), v8::internal::compiler::Trace(), and zone_.

Referenced by ConnectFloatingControl().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ DefaultSchedulerData()

Scheduler::SchedulerData v8::internal::compiler::Scheduler::DefaultSchedulerData ( )
private

Definition at line 221 of file scheduler.cc.

221  {
222  SchedulerData def = {0, 0, false, false, kUnknown};
223  return def;
224 }

References kUnknown.

◆ GenerateImmediateDominatorTree()

void v8::internal::compiler::Scheduler::GenerateImmediateDominatorTree ( )
private

Definition at line 323 of file scheduler.cc.

323  {
324  // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
325  // if this becomes really slow.
326  Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
327  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
328  BasicBlock* current_rpo = schedule_->rpo_order_[i];
329  if (current_rpo != schedule_->start()) {
330  BasicBlock::Predecessors::iterator current_pred =
331  current_rpo->predecessors().begin();
332  BasicBlock::Predecessors::iterator end =
333  current_rpo->predecessors().end();
334  DCHECK(current_pred != end);
335  BasicBlock* dominator = *current_pred;
336  ++current_pred;
337  // For multiple predecessors, walk up the rpo ordering until a common
338  // dominator is found.
339  int current_rpo_pos = GetRPONumber(current_rpo);
340  while (current_pred != end) {
341  // Don't examine backwards edges
342  BasicBlock* pred = *current_pred;
343  if (GetRPONumber(pred) < current_rpo_pos) {
344  dominator = GetCommonDominator(dominator, *current_pred);
345  }
346  ++current_pred;
347  }
348  current_rpo->dominator_ = dominator;
349  Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
350  }
351  }
352 }
BasicBlockVector rpo_order_
Definition: schedule.h:301
int GetRPONumber(BasicBlock *block)
Definition: scheduler.h:68
BasicBlock * GetCommonDominator(BasicBlock *b1, BasicBlock *b2)
Definition: scheduler.cc:308

References DCHECK, GetCommonDominator(), GetRPONumber(), v8::internal::compiler::Schedule::rpo_order_, schedule_, v8::internal::compiler::GenericGraph< V >::start(), and v8::internal::compiler::Trace().

Referenced by ComputeSchedule().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetCommonDominator()

BasicBlock * v8::internal::compiler::Scheduler::GetCommonDominator ( BasicBlock *  b1,
BasicBlock *  b2 
)
private

Definition at line 308 of file scheduler.cc.

308  {
309  while (b1 != b2) {
310  int b1_rpo = GetRPONumber(b1);
311  int b2_rpo = GetRPONumber(b2);
312  DCHECK(b1_rpo != b2_rpo);
313  if (b1_rpo < b2_rpo) {
314  b2 = b2->dominator_;
315  } else {
316  b1 = b1->dominator_;
317  }
318  }
319  return b1;
320 }

References DCHECK, and GetRPONumber().

Referenced by GenerateImmediateDominatorTree(), and v8::internal::compiler::ScheduleLateNodeVisitor::Pre().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetData()

SchedulerData* v8::internal::compiler::Scheduler::GetData ( Node *  node)
inlineprivate

◆ GetPlacement()

Scheduler::Placement v8::internal::compiler::Scheduler::GetPlacement ( Node *  node)
private

Definition at line 262 of file scheduler.cc.

262  {
263  SchedulerData* data = GetData(node);
264  if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
265  switch (node->opcode()) {
266  case IrOpcode::kParameter:
267  // Parameters are always fixed to the start node.
268  data->placement_ = kFixed;
269  break;
270  case IrOpcode::kPhi:
271  case IrOpcode::kEffectPhi: {
272  // Phis and effect phis are fixed if their control inputs are.
273  data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
274  break;
275  }
276 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
278 #undef DEFINE_FLOATING_CONTROL_CASE
279  {
280  // Control nodes that were not control-reachable from end may float.
281  data->placement_ = kSchedulable;
282  if (!data->is_connected_control_) {
283  data->is_floating_control_ = true;
284  has_floating_control_ = true;
285  Trace("Floating control found: #%d:%s\n", node->id(),
286  node->op()->mnemonic());
287  }
288  break;
289  }
290  default:
291  data->placement_ = kSchedulable;
292  break;
293  }
294  }
295  return data->placement_;
296 }
static Node * GetControlInput(Node *node, int index=0)
Placement GetPlacement(Node *node)
Definition: scheduler.cc:262
#define CONTROL_OP_LIST(V)
Definition: opcodes.h:19
#define DEFINE_FLOATING_CONTROL_CASE(V)

References CONTROL_OP_LIST, DEFINE_FLOATING_CONTROL_CASE, v8::internal::compiler::NodeProperties::GetControlInput(), GetData(), has_floating_control_, v8::internal::compiler::Scheduler::SchedulerData::is_connected_control_, v8::internal::compiler::Scheduler::SchedulerData::is_floating_control_, kFixed, kSchedulable, kUnknown, v8::internal::compiler::Scheduler::SchedulerData::placement_, and v8::internal::compiler::Trace().

Referenced by v8::internal::compiler::ScheduleLateNodeVisitor::GetBlockForUse(), v8::internal::compiler::ScheduleEarlyNodeVisitor::Post(), v8::internal::compiler::PrepareUsesVisitor::PostEdge(), v8::internal::compiler::ScheduleEarlyNodeVisitor::Pre(), and v8::internal::compiler::PrepareUsesVisitor::Pre().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetRPONumber()

int v8::internal::compiler::Scheduler::GetRPONumber ( BasicBlock *  block)
inlineprivate

Definition at line 68 of file scheduler.h.

68  {
69  DCHECK(block->rpo_number_ >= 0 &&
70  block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size()));
71  DCHECK(schedule_->rpo_order_[block->rpo_number_] == block);
72  return block->rpo_number_;
73  }

References DCHECK, v8::internal::compiler::Schedule::rpo_order_, and schedule_.

Referenced by GenerateImmediateDominatorTree(), and GetCommonDominator().

+ Here is the caller graph for this function:

◆ PrepareUses()

void v8::internal::compiler::Scheduler::PrepareUses ( )
private

Definition at line 470 of file scheduler.cc.

470  {
471  Trace("------------------- PREPARE USES ------------------\n");
472  // Count the uses of every node, it will be used to ensure that all of a
473  // node's uses are scheduled before the node itself.
474  PrepareUsesVisitor prepare_uses(this);
475  graph_->VisitNodeInputsFromEnd(&prepare_uses);
476 }
void VisitNodeInputsFromEnd(Visitor *visitor)
Definition: graph-inl.h:30

References graph_, v8::internal::compiler::Trace(), and v8::internal::compiler::Graph::VisitNodeInputsFromEnd().

Referenced by ComputeSchedule().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ScheduleEarly()

void v8::internal::compiler::Scheduler::ScheduleEarly ( )
private

Definition at line 410 of file scheduler.cc.

410  {
411  Trace("------------------- SCHEDULE EARLY ----------------\n");
412 
413  int fixpoint_count = 0;
414  ScheduleEarlyNodeVisitor visitor(this);
415  while (visitor.has_changed_rpo_constraints_) {
416  visitor.has_changed_rpo_constraints_ = false;
417  graph_->VisitNodeInputsFromEnd(&visitor);
418  fixpoint_count++;
419  }
420 
421  Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
422 }
friend class ScheduleEarlyNodeVisitor
Definition: scheduler.h:80

References graph_, v8::internal::compiler::ScheduleEarlyNodeVisitor::has_changed_rpo_constraints_, v8::internal::compiler::Trace(), and v8::internal::compiler::Graph::VisitNodeInputsFromEnd().

Referenced by ComputeSchedule().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ScheduleLate()

void v8::internal::compiler::Scheduler::ScheduleLate ( )
private

Definition at line 597 of file scheduler.cc.

597  {
598  Trace("------------------- SCHEDULE LATE -----------------\n");
599  if (FLAG_trace_turbo_scheduler) {
600  Trace("roots: ");
601  for (NodeVectorIter i = schedule_root_nodes_.begin();
602  i != schedule_root_nodes_.end(); ++i) {
603  Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
604  }
605  Trace("\n");
606  }
607 
608  // Schedule: Places nodes in dominator block of all their uses.
609  ScheduleLateNodeVisitor schedule_late_visitor(this);
610 
611  {
612  Zone zone(zone_->isolate());
614  NodeInputIterationTraits<Node> >(
615  graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
616  &schedule_late_visitor);
617  }
618 
619  // Add collected nodes for basic blocks to their blocks in the right order.
620  int block_num = 0;
621  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
622  i != scheduled_nodes_.end(); ++i) {
623  for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
624  schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
625  }
626  block_num++;
627  }
628 }
Isolate * isolate() const
Definition: zone.h:68
static void Visit(GenericGraphBase *graph, Zone *zone, RootIterator root_begin, RootIterator root_end, Visitor *visitor)
BasicBlockVector all_blocks_
Definition: schedule.h:299
void AddNode(BasicBlock *block, Node *node)
Definition: schedule.h:220
friend class ScheduleLateNodeVisitor
Definition: scheduler.h:86
NodeVector::iterator NodeVectorIter
Definition: node.h:73
NodeVector::reverse_iterator NodeVectorRIter
Definition: node.h:75
NodeVectorVector::iterator NodeVectorVectorIter
Definition: node.h:78

References v8::internal::compiler::Schedule::AddNode(), v8::internal::compiler::Schedule::all_blocks_, graph_, v8::internal::Zone::isolate(), schedule_, schedule_root_nodes_, scheduled_nodes_, ScheduleLateNodeVisitor, v8::internal::compiler::Trace(), v8::internal::compiler::GenericGraphVisit::Visit(), and zone_.

Referenced by ComputeSchedule().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

Friends And Related Function Documentation

◆ CFGBuilder

friend class CFGBuilder
friend

Definition at line 78 of file scheduler.h.

◆ PrepareUsesVisitor

friend class PrepareUsesVisitor
friend

Definition at line 83 of file scheduler.h.

◆ ScheduleEarlyNodeVisitor

friend class ScheduleEarlyNodeVisitor
friend

Definition at line 80 of file scheduler.h.

◆ ScheduleLateNodeVisitor

friend class ScheduleLateNodeVisitor
friend

Definition at line 86 of file scheduler.h.

Referenced by ScheduleLate().

Member Data Documentation

◆ graph_

Graph* v8::internal::compiler::Scheduler::graph_
private

◆ has_floating_control_

bool v8::internal::compiler::Scheduler::has_floating_control_
private

Definition at line 53 of file scheduler.h.

Referenced by ConnectFloatingControl(), and GetPlacement().

◆ node_data_

ZoneVector<SchedulerData> v8::internal::compiler::Scheduler::node_data_
private

Definition at line 52 of file scheduler.h.

Referenced by GetData().

◆ schedule_

Schedule* v8::internal::compiler::Scheduler::schedule_
private

◆ schedule_root_nodes_

NodeVector v8::internal::compiler::Scheduler::schedule_root_nodes_
private

◆ scheduled_nodes_

NodeVectorVector v8::internal::compiler::Scheduler::scheduled_nodes_
private

◆ zone_

Zone* v8::internal::compiler::Scheduler::zone_
private

Definition at line 47 of file scheduler.h.

Referenced by BuildCFG(), ConnectFloatingControlSubgraph(), and ScheduleLate().


The documentation for this class was generated from the following files: