29 Node::Uses uses = def->uses();
30 for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
31 if (*it ==
use)
return true;
38 Node::Inputs inputs =
use->inputs();
39 for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) {
40 if (*it == def)
return true;
52 NodeSet::allocator_type(zone)) {}
66 int frame_state_count =
72 int input_count = value_count + context_count + frame_state_count +
73 effect_count + control_count;
74 CHECK_EQ(input_count, node->InputCount());
79 CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
81 (node->opcode() == IrOpcode::kFrameState &&
82 frame_state->opcode() == IrOpcode::kHeapConstant));
88 for (
int i = 0;
i < value_count; ++
i) {
96 for (
int i = 0;
i < context_count; ++
i) {
104 for (
int i = 0;
i < effect_count; ++
i) {
112 for (
int i = 0;
i < control_count; ++
i) {
121 Node::Uses uses = node->uses();
122 for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
124 (*it)->opcode() == IrOpcode::kProjection ||
125 (*it)->opcode() == IrOpcode::kParameter);
129 switch (node->opcode()) {
130 case IrOpcode::kStart:
140 case IrOpcode::kDead:
143 case IrOpcode::kBranch: {
145 Node::Uses uses = node->uses();
146 bool got_true =
false, got_false =
false;
147 for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
148 CHECK(((*it)->opcode() == IrOpcode::kIfTrue && !got_true) ||
149 ((*it)->opcode() == IrOpcode::kIfFalse && !got_false));
150 if ((*it)->opcode() == IrOpcode::kIfTrue) got_true =
true;
151 if ((*it)->opcode() == IrOpcode::kIfFalse) got_false =
true;
157 case IrOpcode::kIfTrue:
158 case IrOpcode::kIfFalse:
162 case IrOpcode::kLoop:
163 case IrOpcode::kMerge:
165 case IrOpcode::kReturn:
168 case IrOpcode::kThrow:
171 case IrOpcode::kParameter: {
177 int index = OpParameter<int>(node);
183 case IrOpcode::kInt32Constant:
184 case IrOpcode::kInt64Constant:
185 case IrOpcode::kFloat64Constant:
186 case IrOpcode::kExternalConstant:
187 case IrOpcode::kNumberConstant:
188 case IrOpcode::kHeapConstant:
192 case IrOpcode::kPhi: {
200 case IrOpcode::kEffectPhi: {
208 case IrOpcode::kFrameState:
211 case IrOpcode::kCall:
214 case IrOpcode::kProjection: {
216 size_t index = OpParameter<size_t>(node);
219 static_cast<int>(index));
257 BasicBlock* container, BasicBlock* use_block,
259 BasicBlock* block = use_block;
261 while (use_pos >= 0) {
262 if (block->nodes_[use_pos] == node)
return true;
265 block = block->dominator_;
266 if (block ==
NULL)
break;
267 use_pos =
static_cast<int>(block->nodes_.size()) - 1;
268 if (node == block->control_input_)
return true;
275 Node* node,
int use_pos) {
278 BasicBlock* use_block = block;
279 if (node->opcode() == IrOpcode::kPhi) {
280 use_block = use_block->PredecessorAt(j);
281 use_pos =
static_cast<int>(use_block->nodes_.size()) - 1;
283 Node* input = node->InputAt(j);
287 "Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
288 node->id(), node->op()->mnemonic(), block->id(), j, input->id(),
289 input->op()->mnemonic());
298 Zone* zone = &tmp_zone;
299 BasicBlock* start = schedule->
start();
303 CHECK_GE(count,
static_cast<int>(rpo_order->size()));
304 for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
311 for (
size_t b = 0; b < rpo_order->size(); b++) {
312 BasicBlock* block = rpo_order->at(b);
313 CHECK_EQ(
static_cast<int>(b), block->rpo_number_);
314 BasicBlock* dom = block->dominator_;
321 CHECK_LT(dom->rpo_number_, block->rpo_number_);
330 marked[start->id()] =
true;
331 while (!queue.empty()) {
332 BasicBlock* block = queue.front();
334 for (
int s = 0; s < block->SuccessorCount(); s++) {
335 BasicBlock* succ = block->SuccessorAt(s);
336 if (!marked[succ->id()]) {
337 marked[succ->id()] =
true;
344 for (
int i = 0;
i < count;
i++) {
348 CHECK_EQ(block, rpo_order->at(block->rpo_number_));
352 for (
size_t b = 0; b < rpo_order->size(); b++) {
353 CHECK(marked[rpo_order->at(b)->id()]);
359 dominators.Initialize(count, zone);
366 dominators[start->id()] =
new (zone)
BitVector(count, zone);
367 while (!queue.empty()) {
368 BasicBlock* block = queue.front();
370 BitVector* block_doms = dominators[block->id()];
371 BasicBlock* idom = block->dominator_;
372 if (idom !=
NULL && !block_doms->
Contains(idom->id())) {
373 V8_Fatal(__FILE__, __LINE__,
"Block B%d is not dominated by B%d",
374 block->id(), idom->id());
376 for (
int s = 0; s < block->SuccessorCount(); s++) {
377 BasicBlock* succ = block->SuccessorAt(s);
378 BitVector* succ_doms = dominators[succ->id()];
380 if (succ_doms ==
NULL) {
382 succ_doms =
new (zone)
BitVector(count, zone);
384 succ_doms->
Add(block->id());
385 dominators[succ->id()] = succ_doms;
389 bool had = succ_doms->
Contains(block->id());
390 if (had) succ_doms->
Remove(block->id());
392 if (had) succ_doms->
Add(block->id());
398 for (BasicBlockVector::iterator b = rpo_order->begin();
399 b != rpo_order->end(); ++b) {
400 BasicBlock* block = *b;
401 BasicBlock* idom = block->dominator_;
402 if (idom ==
NULL)
continue;
403 BitVector* block_doms = dominators[block->id()];
405 for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
407 if (dom != idom && !dominators[idom->id()]->
Contains(dom->id())) {
409 "Block B%d is not immediately dominated by B%d", block->id(),
417 for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
419 for (BasicBlock::const_iterator
i = (*b)->begin();
i != (*b)->end(); ++
i) {
421 if (phi->opcode() != IrOpcode::kPhi)
continue;
424 if (phi->InputCount() >
427 CHECK(control->opcode() == IrOpcode::kMerge ||
428 control->opcode() == IrOpcode::kLoop);
435 for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
437 BasicBlock* block = *b;
440 Node* control = block->control_input_;
441 if (control !=
NULL) {
444 static_cast<int>(block->nodes_.size()) - 1);
447 for (
size_t i = 0;
i < block->nodes_.size();
i++) {
448 Node* node = block->nodes_[
i];
bool IntersectIsChanged(const BitVector &other)
void CopyFrom(const BitVector &other)
bool Contains(int i) const
bool Contains(const T &elm) const
Vector< T > AddBlock(T value, int count, AllocationPolicy allocator=AllocationPolicy())
Isolate * isolate() const
void VisitNodeUsesFromStart(Visitor *visitor)
void VisitNodeInputsFromEnd(Visitor *visitor)
static Node * GetContextInput(Node *node)
static Node * GetValueInput(Node *node, int index)
static Node * GetFrameStateInput(Node *node)
static bool IsValueEdge(Node::Edge edge)
static Node * GetEffectInput(Node *node, int index=0)
static bool IsControl(Node *node)
static Node * GetControlInput(Node *node, int index=0)
static int GetEffectInputCount(const Operator *op)
static int GetFrameStateInputCount(const Operator *op)
static int GetContextInputCount(const Operator *op)
static bool HasValueOutput(const Operator *op)
static int GetValueInputCount(const Operator *op)
static bool HasFrameStateInput(const Operator *op)
static bool HasEffectOutput(const Operator *op)
static int GetValueOutputCount(const Operator *op)
static int GetControlInputCount(const Operator *op)
static bool HasControlOutput(const Operator *op)
static void Run(Schedule *schedule)
int BasicBlockCount() const
BasicBlock * GetBlockById(int block_id)
BasicBlock * block(Node *node) const
BasicBlockVector * rpo_order()
GenericGraphVisit::Control Pre(Node *node)
NodeSet reached_from_start
static void Run(Graph *graph)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to use(in kBytes)") DEFINE_INT(max_stack_trace_source_length
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
void V8_Fatal(const char *file, int line, const char *format,...)
#define CHECK_EQ(expected, value)
#define CHECK_NE(unexpected, value)
static bool HasDominatingDef(Schedule *schedule, Node *node, BasicBlock *container, BasicBlock *use_block, int use_pos)
static bool IsUseDefChainLinkPresent(Node *def, Node *use)
static bool IsDefUseChainLinkPresent(Node *def, Node *use)
std::set< Node *, std::less< Node * >, zone_allocator< Node * > > NodeSet
static void CheckInputsDominate(Schedule *schedule, BasicBlock *block, Node *node, int use_pos)
Debugger support for the V8 JavaScript engine.