33 register_beneficial_(
true) {
71 void LiveRange::Verify()
const {
81 bool LiveRange::HasOverlap(UseInterval* target)
const {
83 while (current_interval !=
NULL) {
85 if (current_interval->Contains(target->start()) ||
86 target->Contains(current_interval->start())) {
89 current_interval = current_interval->
next();
102 assigned_register_(kInvalidAssignment),
103 last_interval_(
NULL),
104 first_interval_(
NULL),
108 current_interval_(
NULL),
109 last_processed_use_(
NULL),
110 current_hint_operand_(
NULL),
111 spill_operand_(new (zone)
LOperand()),
138 DCHECK(!operand->IsUnallocated());
149 use_pos = use_pos->
next();
191 if (use_pos ==
NULL)
return true;
214 DCHECK(!op->IsUnallocated());
237 if (to_start_of ==
NULL)
return;
260 bool split_at_start =
false;
267 while (current !=
NULL) {
269 current->
SplitAt(position, zone);
273 if (
next->start().Value() >= position.
Value()) {
274 split_at_start = (
next->start().Value() == position.
Value());
293 if (split_at_start) {
298 use_before = use_after;
299 use_after = use_after->
next();
303 use_before = use_after;
304 use_after = use_after->
next();
309 if (use_before !=
NULL) {
345 if (pos ==
NULL)
return false;
347 if (other_pos ==
NULL)
return true;
355 LAllocator::TraceAlloc(
"Shorten live range %d to [%d\n",
id_, start.
Value());
366 LAllocator::TraceAlloc(
"Ensure live range %d in interval [%d %d[\n",
391 LAllocator::TraceAlloc(
"Add to live range %d interval [%d %d[\n",
422 LAllocator::TraceAlloc(
"Add to live range %d use position %d\n",
430 prev_hint = current->
HasHint() ? current : prev_hint;
432 current = current->
next();
440 prev->
next_ = use_pos;
452 while (use_pos !=
NULL) {
457 DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
461 use_pos = use_pos->
next();
474 if (!
CanCover(position))
return false;
478 interval = interval->next()) {
480 interval->next()->start().Value() >= interval->start().Value());
482 if (interval->Contains(position))
return true;
483 if (interval->start().Value() > position.
Value())
return false;
498 if (cur_intersection.
IsValid()) {
499 return cur_intersection;
513 LAllocator::LAllocator(
int num_values, HGraph* graph)
514 : zone_(graph->isolate()),
516 live_in_sets_(graph->blocks()->length(), zone()),
517 live_ranges_(num_values * 2, zone()),
518 fixed_live_ranges_(
NULL),
519 fixed_double_live_ranges_(
NULL),
520 unhandled_live_ranges_(num_values * 2, zone()),
521 active_live_ranges_(8, zone()),
522 inactive_live_ranges_(8, zone()),
523 reusable_slots_(8, zone()),
524 next_virtual_register_(num_values),
525 first_artificial_register_(num_values),
529 has_osr_entry_(
false),
530 allocation_ok_(
true) {}
533 void LAllocator::InitializeLivenessAnalysis() {
535 int block_count = graph_->blocks()->length();
536 live_in_sets_.Initialize(block_count, zone());
537 live_in_sets_.AddBlock(
NULL, block_count, zone());
541 BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
544 BitVector* live_out =
new(zone()) BitVector(next_virtual_register_, zone());
547 for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
550 HBasicBlock* successor = it.Current();
551 BitVector* live_in = live_in_sets_[successor->block_id()];
552 if (live_in !=
NULL) live_out->Union(*live_in);
556 int index = successor->PredecessorIndexOf(block);
557 const ZoneList<HPhi*>* phis = successor->phis();
558 for (
int i = 0;
i < phis->length(); ++
i) {
559 HPhi* phi = phis->at(
i);
560 if (!phi->OperandAt(index)->IsConstant()) {
561 live_out->Add(phi->OperandAt(index)->id());
570 void LAllocator::AddInitialIntervals(HBasicBlock* block,
571 BitVector* live_out) {
575 block->first_instruction_index());
578 BitVector::Iterator iterator(live_out);
579 while (!iterator.Done()) {
580 int operand_index = iterator.Current();
581 LiveRange* range = LiveRangeFor(operand_index);
582 range->AddUseInterval(start, end, zone());
588 int LAllocator::FixedDoubleLiveRangeID(
int index) {
593 LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
596 TraceAlloc(
"Allocating fixed reg for op %d\n", operand->virtual_register());
597 DCHECK(operand->HasFixedPolicy());
598 if (operand->HasFixedSlotPolicy()) {
600 }
else if (operand->HasFixedRegisterPolicy()) {
601 int reg_index = operand->fixed_register_index();
603 }
else if (operand->HasFixedDoubleRegisterPolicy()) {
604 int reg_index = operand->fixed_register_index();
610 TraceAlloc(
"Fixed reg is tagged at %d\n", pos);
611 LInstruction* instr = InstructionAt(pos);
612 if (instr->HasPointerMap()) {
613 instr->pointer_map()->RecordPointer(operand, chunk()->zone());
620 LiveRange* LAllocator::FixedLiveRangeFor(
int index) {
622 LiveRange* result = fixed_live_ranges_[index];
623 if (result ==
NULL) {
624 result =
new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
625 DCHECK(result->IsFixed());
627 SetLiveRangeAssignedRegister(result, index);
628 fixed_live_ranges_[index] = result;
634 LiveRange* LAllocator::FixedDoubleLiveRangeFor(
int index) {
636 LiveRange* result = fixed_double_live_ranges_[index];
637 if (result ==
NULL) {
638 result =
new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
640 DCHECK(result->IsFixed());
642 SetLiveRangeAssignedRegister(result, index);
643 fixed_double_live_ranges_[index] = result;
649 LiveRange* LAllocator::LiveRangeFor(
int index) {
650 if (index >= live_ranges_.length()) {
651 live_ranges_.AddBlock(
NULL, index - live_ranges_.length() + 1, zone());
653 LiveRange* result = live_ranges_[index];
654 if (result ==
NULL) {
655 result =
new(zone()) LiveRange(index, chunk()->zone());
656 live_ranges_[index] = result;
662 LGap* LAllocator::GetLastGap(HBasicBlock* block) {
663 int last_instruction = block->last_instruction_index();
664 int index = chunk_->NearestGapPos(last_instruction);
669 HPhi* LAllocator::LookupPhi(LOperand* operand)
const {
670 if (!operand->IsUnallocated())
return NULL;
672 HValue* instr = graph_->LookupValue(index);
673 if (instr !=
NULL && instr->IsPhi()) {
674 return HPhi::cast(instr);
680 LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
681 if (operand->IsUnallocated()) {
683 }
else if (operand->IsRegister()) {
684 return FixedLiveRangeFor(operand->index());
685 }
else if (operand->IsDoubleRegister()) {
686 return FixedDoubleLiveRangeFor(operand->index());
693 void LAllocator::Define(LifetimePosition position,
696 LiveRange* range = LiveRangeFor(operand);
697 if (range ==
NULL)
return;
699 if (range->IsEmpty() || range->Start().Value() > position.Value()) {
701 range->AddUseInterval(position, position.NextInstruction(), zone());
702 range->AddUsePosition(position.NextInstruction(),
NULL,
NULL, zone());
704 range->ShortenTo(position);
707 if (operand->IsUnallocated()) {
709 range->AddUsePosition(position, unalloc_operand, hint, zone());
714 void LAllocator::Use(LifetimePosition block_start,
715 LifetimePosition position,
718 LiveRange* range = LiveRangeFor(operand);
719 if (range ==
NULL)
return;
720 if (operand->IsUnallocated()) {
722 range->AddUsePosition(position, unalloc_operand, hint, zone());
724 range->AddUseInterval(block_start, position, zone());
728 void LAllocator::AddConstraintsGapMove(
int index,
731 LGap* gap = GapAt(index);
732 LParallelMove* move = gap->GetOrCreateParallelMove(
LGap::START,
734 if (from->IsUnallocated()) {
735 const ZoneList<LMoveOperands>* move_operands = move->move_operands();
736 for (
int i = 0;
i < move_operands->length(); ++
i) {
737 LMoveOperands cur = move_operands->at(
i);
738 LOperand* cur_to = cur.destination();
739 if (cur_to->IsUnallocated()) {
742 move->AddMove(cur.source(),
to, chunk()->zone());
748 move->AddMove(from,
to, chunk()->zone());
752 void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
753 int start = block->first_instruction_index();
754 int end = block->last_instruction_index();
755 if (start == -1)
return;
756 for (
int i = start;
i <= end; ++
i) {
758 LInstruction* instr =
NULL;
759 LInstruction* prev_instr =
NULL;
760 if (
i < end) instr = InstructionAt(
i + 1);
761 if (
i > start) prev_instr = InstructionAt(
i - 1);
762 MeetConstraintsBetween(prev_instr, instr,
i);
763 if (!AllocationOk())
return;
769 void LAllocator::MeetConstraintsBetween(LInstruction* first,
770 LInstruction* second,
774 for (TempIterator it(first); !it.Done(); it.Advance()) {
776 if (temp->HasFixedPolicy()) {
777 AllocateFixed(temp, gap_index - 1,
false);
783 if (first !=
NULL && first->Output() !=
NULL) {
785 LiveRange* range = LiveRangeFor(first_output->virtual_register());
786 bool assigned =
false;
787 if (first_output->HasFixedPolicy()) {
788 LUnallocated* output_copy = first_output->CopyUnconstrained(
790 bool is_tagged = HasTaggedValue(first_output->virtual_register());
791 AllocateFixed(first_output, gap_index, is_tagged);
794 if (first_output->IsStackSlot()) {
795 range->SetSpillOperand(first_output);
796 range->SetSpillStartIndex(gap_index - 1);
799 chunk_->AddGapMove(gap_index, first_output, output_copy);
803 range->SetSpillStartIndex(gap_index);
809 LGap* gap = GapAt(gap_index);
810 LParallelMove* move = gap->GetOrCreateParallelMove(
LGap::BEFORE,
812 move->AddMove(first_output, range->GetSpillOperand(),
818 if (second !=
NULL) {
819 for (UseIterator it(second); !it.Done(); it.Advance()) {
821 if (cur_input->HasFixedPolicy()) {
822 LUnallocated* input_copy = cur_input->CopyUnconstrained(
824 bool is_tagged = HasTaggedValue(cur_input->virtual_register());
825 AllocateFixed(cur_input, gap_index + 1, is_tagged);
826 AddConstraintsGapMove(gap_index, input_copy, cur_input);
827 }
else if (cur_input->HasWritableRegisterPolicy()) {
830 DCHECK(!cur_input->IsUsedAtStart());
832 LUnallocated* input_copy = cur_input->CopyUnconstrained(
834 int vreg = GetVirtualRegister();
835 if (!AllocationOk())
return;
836 cur_input->set_virtual_register(vreg);
838 if (RequiredRegisterKind(input_copy->virtual_register()) ==
840 double_artificial_registers_.Add(
841 cur_input->virtual_register() - first_artificial_register_,
845 AddConstraintsGapMove(gap_index, input_copy, cur_input);
851 if (second !=
NULL && second->Output() !=
NULL) {
853 if (second_output->HasSameAsInputPolicy()) {
855 int output_vreg = second_output->virtual_register();
856 int input_vreg = cur_input->virtual_register();
858 LUnallocated* input_copy = cur_input->CopyUnconstrained(
860 cur_input->set_virtual_register(second_output->virtual_register());
861 AddConstraintsGapMove(gap_index, input_copy, cur_input);
863 if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
864 int index = gap_index + 1;
865 LInstruction* instr = InstructionAt(index);
866 if (instr->HasPointerMap()) {
867 instr->pointer_map()->RecordPointer(input_copy, chunk()->zone());
869 }
else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
882 void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
883 int block_start = block->first_instruction_index();
884 int index = block->last_instruction_index();
886 LifetimePosition block_start_position =
889 while (index >= block_start) {
890 LifetimePosition curr_position =
893 if (IsGapAt(index)) {
895 LGap* gap = GapAt(index);
896 LParallelMove* move = gap->GetOrCreateParallelMove(
LGap::START,
898 const ZoneList<LMoveOperands>* move_operands = move->move_operands();
899 for (
int i = 0;
i < move_operands->length(); ++
i) {
900 LMoveOperands* cur = &move_operands->at(
i);
901 if (cur->IsIgnored())
continue;
902 LOperand* from = cur->source();
903 LOperand*
to = cur->destination();
904 HPhi* phi = LookupPhi(
to);
908 if (!phi->block()->IsLoopHeader()) {
909 hint = LiveRangeFor(phi->id())->current_hint_operand();
912 if (
to->IsUnallocated()) {
914 Define(curr_position,
to, from);
921 Define(curr_position,
to, from);
924 Use(block_start_position, curr_position, from, hint);
925 if (from->IsUnallocated()) {
931 LInstruction* instr = InstructionAt(index);
934 LOperand* output = instr->Output();
935 if (output !=
NULL) {
936 if (output->IsUnallocated()) {
939 Define(curr_position, output,
NULL);
942 if (instr->ClobbersRegisters()) {
944 if (output ==
NULL || !output->IsRegister() ||
945 output->index() !=
i) {
946 LiveRange* range = FixedLiveRangeFor(
i);
947 range->AddUseInterval(curr_position,
948 curr_position.InstructionEnd(),
954 if (instr->ClobbersDoubleRegisters(isolate())) {
956 if (output ==
NULL || !output->IsDoubleRegister() ||
957 output->index() !=
i) {
958 LiveRange* range = FixedDoubleLiveRangeFor(
i);
959 range->AddUseInterval(curr_position,
960 curr_position.InstructionEnd(),
966 for (UseIterator it(instr); !it.Done(); it.Advance()) {
967 LOperand* input = it.Current();
969 LifetimePosition use_pos;
970 if (input->IsUnallocated() &&
972 use_pos = curr_position;
974 use_pos = curr_position.InstructionEnd();
977 Use(block_start_position, use_pos, input,
NULL);
978 if (input->IsUnallocated()) {
983 for (TempIterator it(instr); !it.Done(); it.Advance()) {
984 LOperand* temp = it.Current();
985 if (instr->ClobbersTemps()) {
986 if (temp->IsRegister())
continue;
987 if (temp->IsUnallocated()) {
989 if (temp_unalloc->HasFixedPolicy()) {
994 Use(block_start_position, curr_position.InstructionEnd(), temp,
NULL);
995 Define(curr_position, temp,
NULL);
997 if (temp->IsUnallocated()) {
999 if (temp_unalloc->HasDoubleRegisterPolicy()) {
1000 double_artificial_registers_.Add(
1001 temp_unalloc->virtual_register() - first_artificial_register_,
1014 void LAllocator::ResolvePhis(HBasicBlock* block) {
1015 const ZoneList<HPhi*>* phis = block->phis();
1016 for (
int i = 0;
i < phis->length(); ++
i) {
1017 HPhi* phi = phis->at(
i);
1018 LUnallocated* phi_operand =
1020 phi_operand->set_virtual_register(phi->id());
1021 for (
int j = 0; j < phi->OperandCount(); ++j) {
1022 HValue* op = phi->OperandAt(j);
1023 LOperand* operand =
NULL;
1024 if (op->IsConstant() && op->EmitAtUses()) {
1025 HConstant* constant = HConstant::cast(op);
1026 operand = chunk_->DefineConstantOperand(constant);
1028 DCHECK(!op->EmitAtUses());
1029 LUnallocated* unalloc =
1031 unalloc->set_virtual_register(op->id());
1034 HBasicBlock* cur_block = block->predecessors()->at(j);
1037 chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
1049 LInstruction* branch =
1050 InstructionAt(cur_block->last_instruction_index());
1051 if (branch->HasPointerMap()) {
1052 if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
1053 branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone());
1054 }
else if (!phi->representation().IsDouble()) {
1055 branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone());
1060 LiveRange* live_range = LiveRangeFor(phi->id());
1061 LLabel* label = chunk_->GetLabel(phi->block()->block_id());
1062 label->GetOrCreateParallelMove(
LGap::START, chunk()->zone())->
1063 AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone());
1064 live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
1069 bool LAllocator::Allocate(LChunk* chunk) {
1071 chunk_ =
static_cast<LPlatformChunk*
>(chunk);
1072 assigned_registers_ =
1075 assigned_double_registers_ =
1078 MeetRegisterConstraints();
1079 if (!AllocationOk())
return false;
1082 AllocateGeneralRegisters();
1083 if (!AllocationOk())
return false;
1084 AllocateDoubleRegisters();
1085 if (!AllocationOk())
return false;
1086 PopulatePointerMaps();
1088 ResolveControlFlow();
1093 void LAllocator::MeetRegisterConstraints() {
1094 LAllocatorPhase phase(
"L_Register constraints",
this);
1095 const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1096 for (
int i = 0;
i < blocks->length(); ++
i) {
1097 HBasicBlock* block = blocks->at(
i);
1098 MeetRegisterConstraints(block);
1099 if (!AllocationOk())
return;
1104 void LAllocator::ResolvePhis() {
1105 LAllocatorPhase phase(
"L_Resolve phis",
this);
1108 const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1109 for (
int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
1110 HBasicBlock* block = blocks->at(block_id);
1116 void LAllocator::ResolveControlFlow(LiveRange* range,
1118 HBasicBlock* pred) {
1119 LifetimePosition pred_end =
1121 LifetimePosition cur_start =
1123 LiveRange* pred_cover =
NULL;
1124 LiveRange* cur_cover =
NULL;
1125 LiveRange* cur_range = range;
1126 while (cur_range !=
NULL && (cur_cover ==
NULL || pred_cover ==
NULL)) {
1127 if (cur_range->CanCover(cur_start)) {
1129 cur_cover = cur_range;
1131 if (cur_range->CanCover(pred_end)) {
1133 pred_cover = cur_range;
1135 cur_range = cur_range->next();
1138 if (cur_cover->IsSpilled())
return;
1140 if (pred_cover != cur_cover) {
1141 LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
1142 LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
1143 if (!pred_op->Equals(cur_op)) {
1145 if (block->predecessors()->length() == 1) {
1146 gap = GapAt(block->first_instruction_index());
1148 DCHECK(pred->end()->SecondSuccessor() ==
NULL);
1149 gap = GetLastGap(pred);
1159 LInstruction* branch = InstructionAt(pred->last_instruction_index());
1160 if (branch->HasPointerMap()) {
1161 if (HasTaggedValue(range->id())) {
1162 branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
1163 }
else if (!cur_op->IsDoubleStackSlot() &&
1164 !cur_op->IsDoubleRegister()) {
1165 branch->pointer_map()->RemovePointer(cur_op);
1169 gap->GetOrCreateParallelMove(
1170 LGap::START, chunk()->zone())->AddMove(pred_op, cur_op,
1177 LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
1178 int index = pos.InstructionIndex();
1179 if (IsGapAt(index)) {
1180 LGap* gap = GapAt(index);
1181 return gap->GetOrCreateParallelMove(
1184 int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
1185 return GapAt(gap_pos)->GetOrCreateParallelMove(
1190 HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
1191 LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
1192 return gap->block();
1196 void LAllocator::ConnectRanges() {
1197 LAllocatorPhase phase(
"L_Connect ranges",
this);
1198 for (
int i = 0;
i < live_ranges()->length(); ++
i) {
1199 LiveRange* first_range = live_ranges()->at(
i);
1200 if (first_range ==
NULL || first_range->parent() !=
NULL)
continue;
1202 LiveRange* second_range = first_range->next();
1203 while (second_range !=
NULL) {
1204 LifetimePosition pos = second_range->Start();
1206 if (!second_range->IsSpilled()) {
1209 if (first_range->End().Value() == pos.Value()) {
1210 bool should_insert =
true;
1211 if (IsBlockBoundary(pos)) {
1212 should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
1214 if (should_insert) {
1215 LParallelMove* move = GetConnectingParallelMove(pos);
1216 LOperand* prev_operand = first_range->CreateAssignedOperand(
1218 LOperand* cur_operand = second_range->CreateAssignedOperand(
1220 move->AddMove(prev_operand, cur_operand,
1226 first_range = second_range;
1227 second_range = second_range->next();
1233 bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block)
const {
1234 if (block->predecessors()->length() != 1)
return false;
1235 return block->predecessors()->first()->block_id() == block->block_id() - 1;
1239 void LAllocator::ResolveControlFlow() {
1240 LAllocatorPhase phase(
"L_Resolve control flow",
this);
1241 const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1242 for (
int block_id = 1; block_id < blocks->length(); ++block_id) {
1243 HBasicBlock* block = blocks->at(block_id);
1244 if (CanEagerlyResolveControlFlow(block))
continue;
1245 BitVector* live = live_in_sets_[block->block_id()];
1246 BitVector::Iterator iterator(live);
1247 while (!iterator.Done()) {
1248 int operand_index = iterator.Current();
1249 for (
int i = 0;
i < block->predecessors()->length(); ++
i) {
1250 HBasicBlock* cur = block->predecessors()->at(
i);
1251 LiveRange* cur_range = LiveRangeFor(operand_index);
1252 ResolveControlFlow(cur_range, block, cur);
1260 void LAllocator::BuildLiveRanges() {
1261 LAllocatorPhase phase(
"L_Build live ranges",
this);
1262 InitializeLivenessAnalysis();
1264 const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
1265 for (
int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
1266 HBasicBlock* block = blocks->at(block_id);
1267 BitVector* live = ComputeLiveOut(block);
1270 AddInitialIntervals(block, live);
1274 ProcessInstructions(block, live);
1276 const ZoneList<HPhi*>* phis = block->phis();
1277 for (
int i = 0;
i < phis->length(); ++
i) {
1280 HPhi* phi = phis->at(
i);
1281 live->Remove(phi->id());
1283 LOperand* hint =
NULL;
1284 LOperand* phi_operand =
NULL;
1285 LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
1286 LParallelMove* move = gap->GetOrCreateParallelMove(
LGap::START,
1288 for (
int j = 0; j < move->move_operands()->length(); ++j) {
1289 LOperand*
to = move->move_operands()->at(j).destination();
1290 if (
to->IsUnallocated() &&
1292 hint = move->move_operands()->at(j).source();
1300 block->first_instruction_index());
1301 Define(block_start, phi_operand, hint);
1306 live_in_sets_[block_id] = live;
1310 if (block->IsLoopHeader()) {
1315 HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
1316 BitVector::Iterator iterator(live);
1318 block->first_instruction_index());
1321 while (!iterator.Done()) {
1322 int operand_index = iterator.Current();
1323 LiveRange* range = LiveRangeFor(operand_index);
1324 range->EnsureInterval(start, end, zone());
1328 for (
int i = block->block_id() + 1; i <= back_edge->block_id(); ++
i) {
1329 live_in_sets_[
i]->Union(*live);
1334 if (block_id == 0) {
1335 BitVector::Iterator iterator(live);
1337 while (!iterator.Done()) {
1339 int operand_index = iterator.Current();
1340 if (chunk_->info()->IsStub()) {
1341 CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
1342 PrintF(
"Function: %s\n", CodeStub::MajorName(major_key,
false));
1344 DCHECK(chunk_->info()->IsOptimizing());
1347 chunk_->info()->function()->debug_name()->ToCString().get());
1349 PrintF(
"Value %d used before first definition!\n", operand_index);
1350 LiveRange* range = LiveRangeFor(operand_index);
1351 PrintF(
"First use is at %d\n", range->first_pos()->pos().Value());
1359 for (
int i = 0;
i < live_ranges_.length(); ++
i) {
1360 if (live_ranges_[
i] !=
NULL) {
1361 live_ranges_[
i]->kind_ = RequiredRegisterKind(live_ranges_[
i]->
id());
1367 bool LAllocator::SafePointsAreInOrder()
const {
1368 const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
1370 for (
int i = 0;
i < pointer_maps->length(); ++
i) {
1371 LPointerMap*
map = pointer_maps->at(
i);
1372 if (safe_point >
map->lithium_position())
return false;
1373 safe_point =
map->lithium_position();
1379 void LAllocator::PopulatePointerMaps() {
1380 LAllocatorPhase phase(
"L_Populate pointer maps",
this);
1381 const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
1383 DCHECK(SafePointsAreInOrder());
1387 int first_safe_point_index = 0;
1388 int last_range_start = 0;
1389 for (
int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
1390 LiveRange* range = live_ranges()->at(range_idx);
1391 if (range ==
NULL)
continue;
1393 if (range->parent() !=
NULL)
continue;
1395 if (!HasTaggedValue(range->id()))
continue;
1397 if (range->IsEmpty())
continue;
1400 int start = range->Start().InstructionIndex();
1402 for (LiveRange* cur = range; cur !=
NULL; cur = cur->next()) {
1403 LifetimePosition this_end = cur->End();
1404 if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
1405 DCHECK(cur->Start().InstructionIndex() >= start);
1411 if (start < last_range_start) {
1412 first_safe_point_index = 0;
1414 last_range_start = start;
1418 while (first_safe_point_index < pointer_maps->length()) {
1419 LPointerMap*
map = pointer_maps->at(first_safe_point_index);
1420 int safe_point =
map->lithium_position();
1421 if (safe_point >= start)
break;
1422 first_safe_point_index++;
1426 for (
int safe_point_index = first_safe_point_index;
1427 safe_point_index < pointer_maps->length();
1428 ++safe_point_index) {
1429 LPointerMap*
map = pointer_maps->at(safe_point_index);
1430 int safe_point =
map->lithium_position();
1433 if (safe_point - 1 > end)
break;
1437 LifetimePosition safe_point_pos =
1439 LiveRange* cur = range;
1440 while (cur !=
NULL && !cur->Covers(safe_point_pos)) {
1443 if (cur ==
NULL)
continue;
1447 if (range->HasAllocatedSpillOperand() &&
1448 safe_point >= range->spill_start_index()) {
1449 TraceAlloc(
"Pointer for range %d (spilled at %d) at safe point %d\n",
1450 range->id(), range->spill_start_index(), safe_point);
1451 map->RecordPointer(range->GetSpillOperand(), chunk()->zone());
1454 if (!cur->IsSpilled()) {
1455 TraceAlloc(
"Pointer in register for range %d (start at %d) "
1456 "at safe point %d\n",
1457 cur->id(), cur->Start().Value(), safe_point);
1458 LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
1459 DCHECK(!operand->IsStackSlot());
1460 map->RecordPointer(operand, chunk()->zone());
1467 void LAllocator::AllocateGeneralRegisters() {
1468 LAllocatorPhase phase(
"L_Allocate general registers",
this);
1471 AllocateRegisters();
1475 void LAllocator::AllocateDoubleRegisters() {
1476 LAllocatorPhase phase(
"L_Allocate double registers",
this);
1479 AllocateRegisters();
1483 void LAllocator::AllocateRegisters() {
1484 DCHECK(unhandled_live_ranges_.is_empty());
1486 for (
int i = 0;
i < live_ranges_.length(); ++
i) {
1487 if (live_ranges_[
i] !=
NULL) {
1488 if (live_ranges_[
i]->Kind() == mode_) {
1489 AddToUnhandledUnsorted(live_ranges_[
i]);
1494 DCHECK(UnhandledIsSorted());
1496 DCHECK(reusable_slots_.is_empty());
1497 DCHECK(active_live_ranges_.is_empty());
1498 DCHECK(inactive_live_ranges_.is_empty());
1502 LiveRange* current = fixed_double_live_ranges_.at(
i);
1503 if (current !=
NULL) {
1504 AddToInactive(current);
1509 for (
int i = 0;
i < fixed_live_ranges_.length(); ++
i) {
1510 LiveRange* current = fixed_live_ranges_.at(
i);
1511 if (current !=
NULL) {
1512 AddToInactive(current);
1517 while (!unhandled_live_ranges_.is_empty()) {
1518 DCHECK(UnhandledIsSorted());
1519 LiveRange* current = unhandled_live_ranges_.RemoveLast();
1520 DCHECK(UnhandledIsSorted());
1521 LifetimePosition position = current->Start();
1523 allocation_finger_ = position;
1525 TraceAlloc(
"Processing interval %d start=%d\n",
1529 if (current->HasAllocatedSpillOperand()) {
1530 TraceAlloc(
"Live range %d already has a spill operand\n", current->id());
1531 LifetimePosition next_pos = position;
1532 if (IsGapAt(next_pos.InstructionIndex())) {
1533 next_pos = next_pos.NextInstruction();
1535 UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
1541 }
else if (pos->pos().Value() >
1542 current->Start().NextInstruction().Value()) {
1545 SpillBetween(current, current->Start(), pos->pos());
1546 if (!AllocationOk())
return;
1547 DCHECK(UnhandledIsSorted());
1552 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1553 LiveRange* cur_active = active_live_ranges_.at(
i);
1554 if (cur_active->End().Value() <= position.Value()) {
1555 ActiveToHandled(cur_active);
1557 }
else if (!cur_active->Covers(position)) {
1558 ActiveToInactive(cur_active);
1563 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
1564 LiveRange* cur_inactive = inactive_live_ranges_.at(
i);
1565 if (cur_inactive->End().Value() <= position.Value()) {
1566 InactiveToHandled(cur_inactive);
1568 }
else if (cur_inactive->Covers(position)) {
1569 InactiveToActive(cur_inactive);
1574 DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
1576 bool result = TryAllocateFreeReg(current);
1577 if (!AllocationOk())
return;
1579 if (!result) AllocateBlockedReg(current);
1580 if (!AllocationOk())
return;
1582 if (current->HasRegisterAssigned()) {
1583 AddToActive(current);
1587 reusable_slots_.Rewind(0);
1588 active_live_ranges_.Rewind(0);
1589 inactive_live_ranges_.Rewind(0);
1593 const char* LAllocator::RegisterName(
int allocation_index) {
1602 void LAllocator::TraceAlloc(
const char* msg, ...) {
1603 if (FLAG_trace_alloc) {
1605 va_start(arguments, msg);
1612 bool LAllocator::HasTaggedValue(
int virtual_register)
const {
1613 HValue* value = graph_->LookupValue(virtual_register);
1614 if (value ==
NULL)
return false;
1615 return value->representation().IsTagged() && !value->type().IsSmi();
1619 RegisterKind LAllocator::RequiredRegisterKind(
int virtual_register)
const {
1620 if (virtual_register < first_artificial_register_) {
1621 HValue* value = graph_->LookupValue(virtual_register);
1622 if (value !=
NULL && value->representation().IsDouble()) {
1625 }
else if (double_artificial_registers_.Contains(
1626 virtual_register - first_artificial_register_)) {
1634 void LAllocator::AddToActive(LiveRange* range) {
1635 TraceAlloc(
"Add live range %d to active\n", range->id());
1636 active_live_ranges_.Add(range, zone());
1640 void LAllocator::AddToInactive(LiveRange* range) {
1641 TraceAlloc(
"Add live range %d to inactive\n", range->id());
1642 inactive_live_ranges_.Add(range, zone());
1646 void LAllocator::AddToUnhandledSorted(LiveRange* range) {
1647 if (range ==
NULL || range->IsEmpty())
return;
1648 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
1649 DCHECK(allocation_finger_.Value() <= range->Start().Value());
1650 for (
int i = unhandled_live_ranges_.length() - 1;
i >= 0; --
i) {
1651 LiveRange* cur_range = unhandled_live_ranges_.at(
i);
1652 if (range->ShouldBeAllocatedBefore(cur_range)) {
1653 TraceAlloc(
"Add live range %d to unhandled at %d\n", range->id(),
i + 1);
1654 unhandled_live_ranges_.InsertAt(
i + 1, range, zone());
1655 DCHECK(UnhandledIsSorted());
1659 TraceAlloc(
"Add live range %d to unhandled at start\n", range->id());
1660 unhandled_live_ranges_.InsertAt(0, range, zone());
1661 DCHECK(UnhandledIsSorted());
1665 void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
1666 if (range ==
NULL || range->IsEmpty())
return;
1667 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
1668 TraceAlloc(
"Add live range %d to unhandled unsorted at end\n", range->id());
1669 unhandled_live_ranges_.Add(range, zone());
1674 DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
1675 !(*b)->ShouldBeAllocatedBefore(*a));
1676 if ((*a)->ShouldBeAllocatedBefore(*b))
return 1;
1677 if ((*b)->ShouldBeAllocatedBefore(*a))
return -1;
1678 return (*a)->id() - (*b)->id();
1685 void LAllocator::SortUnhandled() {
1686 TraceAlloc(
"Sort unhandled\n");
1691 bool LAllocator::UnhandledIsSorted() {
1692 int len = unhandled_live_ranges_.length();
1693 for (
int i = 1;
i < len;
i++) {
1694 LiveRange* a = unhandled_live_ranges_.at(
i - 1);
1695 LiveRange* b = unhandled_live_ranges_.at(
i);
1696 if (a->Start().Value() < b->Start().Value())
return false;
1702 void LAllocator::FreeSpillSlot(LiveRange* range) {
1704 if (range->next() !=
NULL)
return;
1706 if (!range->TopLevel()->HasAllocatedSpillOperand())
return;
1708 int index = range->TopLevel()->GetSpillOperand()->index();
1710 reusable_slots_.Add(range, zone());
1715 LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
1716 if (reusable_slots_.is_empty())
return NULL;
1717 if (reusable_slots_.first()->End().Value() >
1718 range->TopLevel()->Start().Value()) {
1721 LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
1722 reusable_slots_.Remove(0);
1727 void LAllocator::ActiveToHandled(LiveRange* range) {
1728 DCHECK(active_live_ranges_.Contains(range));
1729 active_live_ranges_.RemoveElement(range);
1730 TraceAlloc(
"Moving live range %d from active to handled\n", range->id());
1731 FreeSpillSlot(range);
1735 void LAllocator::ActiveToInactive(LiveRange* range) {
1736 DCHECK(active_live_ranges_.Contains(range));
1737 active_live_ranges_.RemoveElement(range);
1738 inactive_live_ranges_.Add(range, zone());
1739 TraceAlloc(
"Moving live range %d from active to inactive\n", range->id());
1743 void LAllocator::InactiveToHandled(LiveRange* range) {
1744 DCHECK(inactive_live_ranges_.Contains(range));
1745 inactive_live_ranges_.RemoveElement(range);
1746 TraceAlloc(
"Moving live range %d from inactive to handled\n", range->id());
1747 FreeSpillSlot(range);
1751 void LAllocator::InactiveToActive(LiveRange* range) {
1752 DCHECK(inactive_live_ranges_.Contains(range));
1753 inactive_live_ranges_.RemoveElement(range);
1754 active_live_ranges_.Add(range, zone());
1755 TraceAlloc(
"Moving live range %d from inactive to active\n", range->id());
1765 bool LAllocator::TryAllocateFreeReg(
LiveRange* current) {
1768 for (
int i = 0;
i < num_registers_;
i++) {
1772 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1773 LiveRange* cur_active = active_live_ranges_.at(
i);
1774 free_until_pos[cur_active->assigned_register()] =
1778 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
1779 LiveRange* cur_inactive = inactive_live_ranges_.at(
i);
1781 LifetimePosition next_intersection =
1782 cur_inactive->FirstIntersection(current);
1783 if (!next_intersection.IsValid())
continue;
1784 int cur_reg = cur_inactive->assigned_register();
1785 free_until_pos[cur_reg] =
Min(free_until_pos[cur_reg], next_intersection);
1789 if (hint !=
NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
1790 int register_index = hint->
index();
1792 "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
1793 RegisterName(register_index),
1794 free_until_pos[register_index].Value(),
1799 if (free_until_pos[register_index].Value() >= current->
End().
Value()) {
1800 TraceAlloc(
"Assigning preferred reg %s to live range %d\n",
1801 RegisterName(register_index),
1803 SetLiveRangeAssignedRegister(current, register_index);
1810 for (
int i = 1;
i < RegisterCount(); ++
i) {
1811 if (free_until_pos[
i].Value() > free_until_pos[reg].
Value()) {
1816 LifetimePosition pos = free_until_pos[reg];
1818 if (pos.Value() <= current->
Start().
Value()) {
1823 if (pos.Value() < current->
End().
Value()) {
1826 LiveRange* tail = SplitRangeAt(current, pos);
1827 if (!AllocationOk())
return false;
1828 AddToUnhandledSorted(tail);
1835 TraceAlloc(
"Assigning free reg %s to live range %d\n",
1838 SetLiveRangeAssignedRegister(current, reg);
1844 void LAllocator::AllocateBlockedReg(LiveRange* current) {
1845 UsePosition* register_use = current->NextRegisterPosition(current->Start());
1846 if (register_use ==
NULL) {
1857 for (
int i = 0;
i < num_registers_;
i++) {
1861 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1862 LiveRange* range = active_live_ranges_[
i];
1863 int cur_reg = range->assigned_register();
1864 if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
1865 block_pos[cur_reg] = use_pos[cur_reg] =
1868 UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
1870 if (next_use ==
NULL) {
1871 use_pos[cur_reg] = range->End();
1873 use_pos[cur_reg] = next_use->pos();
1878 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
1879 LiveRange* range = inactive_live_ranges_.at(
i);
1880 DCHECK(range->End().Value() > current->Start().Value());
1881 LifetimePosition next_intersection = range->FirstIntersection(current);
1882 if (!next_intersection.IsValid())
continue;
1883 int cur_reg = range->assigned_register();
1884 if (range->IsFixed()) {
1885 block_pos[cur_reg] =
Min(block_pos[cur_reg], next_intersection);
1886 use_pos[cur_reg] =
Min(block_pos[cur_reg], use_pos[cur_reg]);
1888 use_pos[cur_reg] =
Min(use_pos[cur_reg], next_intersection);
1893 for (
int i = 1;
i < RegisterCount(); ++
i) {
1894 if (use_pos[
i].Value() > use_pos[reg].Value()) {
1899 LifetimePosition pos = use_pos[reg];
1901 if (pos.Value() < register_use->pos().Value()) {
1904 SpillBetween(current, current->Start(), register_use->pos());
1908 if (block_pos[reg].Value() < current->End().Value()) {
1911 LiveRange* tail = SplitBetween(current,
1913 block_pos[reg].InstructionStart());
1914 if (!AllocationOk())
return;
1915 AddToUnhandledSorted(tail);
1919 DCHECK(block_pos[reg].Value() >= current->End().Value());
1920 TraceAlloc(
"Assigning blocked reg %s to live range %d\n",
1923 SetLiveRangeAssignedRegister(current, reg);
1928 SplitAndSpillIntersecting(current);
1932 LifetimePosition LAllocator::FindOptimalSpillingPos(LiveRange* range,
1933 LifetimePosition pos) {
1934 HBasicBlock* block = GetBlock(pos.InstructionStart());
1935 HBasicBlock* loop_header =
1936 block->IsLoopHeader() ? block : block->parent_loop_header();
1938 if (loop_header ==
NULL)
return pos;
1940 UsePosition* prev_use =
1941 range->PreviousUsePositionRegisterIsBeneficial(pos);
1943 while (loop_header !=
NULL) {
1948 loop_header->first_instruction_index());
1950 if (range->Covers(loop_start)) {
1951 if (prev_use ==
NULL || prev_use->pos().Value() < loop_start.Value()) {
1958 loop_header = loop_header->parent_loop_header();
1965 void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
1966 DCHECK(current->HasRegisterAssigned());
1967 int reg = current->assigned_register();
1968 LifetimePosition split_pos = current->Start();
1969 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1970 LiveRange* range = active_live_ranges_[
i];
1971 if (range->assigned_register() == reg) {
1972 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
1973 LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
1974 if (next_pos ==
NULL) {
1975 SpillAfter(range, spill_pos);
1985 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
1987 if (!AllocationOk())
return;
1988 ActiveToHandled(range);
1993 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
1994 LiveRange* range = inactive_live_ranges_[
i];
1995 DCHECK(range->End().Value() > current->Start().Value());
1996 if (range->assigned_register() == reg && !range->IsFixed()) {
1997 LifetimePosition next_intersection = range->FirstIntersection(current);
1998 if (next_intersection.IsValid()) {
1999 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2000 if (next_pos ==
NULL) {
2001 SpillAfter(range, split_pos);
2003 next_intersection =
Min(next_intersection, next_pos->pos());
2004 SpillBetween(range, split_pos, next_intersection);
2006 if (!AllocationOk())
return;
2007 InactiveToHandled(range);
2015 bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
2016 return pos.IsInstructionStart() &&
2017 InstructionAt(pos.InstructionIndex())->IsLabel();
2021 LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
2022 DCHECK(!range->IsFixed());
2023 TraceAlloc(
"Splitting live range %d at %d\n", range->id(), pos.Value());
2025 if (pos.Value() <= range->Start().Value())
return range;
2029 DCHECK(pos.IsInstructionStart() ||
2030 !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
2032 int vreg = GetVirtualRegister();
2033 if (!AllocationOk())
return NULL;
2034 LiveRange* result = LiveRangeFor(vreg);
2035 range->SplitAt(pos, result, zone());
2040 LiveRange* LAllocator::SplitBetween(LiveRange* range,
2041 LifetimePosition start,
2042 LifetimePosition end) {
2043 DCHECK(!range->IsFixed());
2044 TraceAlloc(
"Splitting live range %d in position between [%d, %d]\n",
2049 LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2050 DCHECK(split_pos.Value() >= start.Value());
2051 return SplitRangeAt(range, split_pos);
2055 LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
2056 LifetimePosition end) {
2057 int start_instr = start.InstructionIndex();
2058 int end_instr = end.InstructionIndex();
2059 DCHECK(start_instr <= end_instr);
2062 if (start_instr == end_instr)
return end;
2064 HBasicBlock* start_block = GetBlock(start);
2065 HBasicBlock* end_block = GetBlock(end);
2067 if (end_block == start_block) {
2073 HBasicBlock* block = end_block;
2075 while (block->parent_loop_header() !=
NULL &&
2076 block->parent_loop_header()->block_id() > start_block->block_id()) {
2077 block = block->parent_loop_header();
2082 if (block == end_block && !end_block->IsLoopHeader())
return end;
2085 block->first_instruction_index());
2089 void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
2090 LiveRange* second_part = SplitRangeAt(range, pos);
2091 if (!AllocationOk())
return;
2096 void LAllocator::SpillBetween(LiveRange* range,
2097 LifetimePosition start,
2098 LifetimePosition end) {
2099 SpillBetweenUntil(range, start, start, end);
2103 void LAllocator::SpillBetweenUntil(LiveRange* range,
2104 LifetimePosition start,
2105 LifetimePosition until,
2106 LifetimePosition end) {
2107 CHECK(start.Value() < end.Value());
2108 LiveRange* second_part = SplitRangeAt(range, start);
2109 if (!AllocationOk())
return;
2111 if (second_part->Start().Value() < end.Value()) {
2115 LiveRange* third_part = SplitBetween(
2117 Max(second_part->Start().InstructionEnd(), until),
2119 if (!AllocationOk())
return;
2121 DCHECK(third_part != second_part);
2124 AddToUnhandledSorted(third_part);
2128 AddToUnhandledSorted(second_part);
2133 void LAllocator::Spill(LiveRange* range) {
2134 DCHECK(!range->IsSpilled());
2135 TraceAlloc(
"Spilling live range %d\n", range->id());
2136 LiveRange* first = range->TopLevel();
2138 if (!first->HasAllocatedSpillOperand()) {
2139 LOperand* op = TryReuseSpillSlot(range);
2140 if (op ==
NULL) op = chunk_->GetNextSpillSlot(range->Kind());
2141 first->SetSpillOperand(op);
2143 range->MakeSpilled(chunk()->zone());
2147 int LAllocator::RegisterCount()
const {
2148 return num_registers_;
2155 void LAllocator::Verify()
const {
2156 for (
int i = 0;
i < live_ranges()->length(); ++
i) {
2157 LiveRange* current = live_ranges()->at(
i);
2158 if (current !=
NULL) current->Verify();
2168 allocator_(allocator) {
2169 if (FLAG_hydrogen_stats) {
2171 allocator->zone()->allocation_size();
2177 if (FLAG_hydrogen_stats) {
2180 isolate()->GetHStatistics()->SaveTiming(
name(), base::TimeDelta(),
size);
2183 if (ShouldProduceTraceOutput()) {
2184 isolate()->GetHTracer()->TraceLithium(
name(),
allocator_->chunk());
The superclass of all JavaScript values and objects.
static void VPrint(const char *format, va_list args)
LAllocatorPhase(const char *name, LAllocator *allocator)
unsigned allocator_zone_start_allocation_size_
void ConvertTo(Kind kind, int index)
static LUnallocated * cast(LOperand *op)
int virtual_register() const
bool HasRegisterPolicy() const
void set_virtual_register(unsigned id)
bool HasAnyPolicy() const
bool HasDoubleRegisterPolicy() const
static LifetimePosition MaxPosition()
static LifetimePosition Invalid()
LifetimePosition InstructionEnd() const
static LifetimePosition FromInstructionIndex(int index)
LifetimePosition NextInstruction() const
LifetimePosition PrevInstruction() const
bool HasRegisterAssigned() const
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start)
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
bool Covers(LifetimePosition position)
void ShortenTo(LifetimePosition start)
LOperand * FirstHint() const
void SetSpillOperand(LOperand *operand)
int assigned_register() const
static const int kInvalidAssignment
RegisterKind Kind() const
LOperand * current_hint_operand_
LOperand * spill_operand_
LifetimePosition Start() const
UseInterval * last_interval_
LOperand * GetSpillOperand() const
LOperand * CreateAssignedOperand(Zone *zone)
UsePosition * PreviousUsePositionRegisterIsBeneficial(LifetimePosition start)
bool HasAllocatedSpillOperand() const
LifetimePosition FirstIntersection(LiveRange *other)
UseInterval * current_interval_
UsePosition * NextRegisterPosition(LifetimePosition start)
UseInterval * first_interval() const
LiveRange(int id, Zone *zone)
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
UseInterval * first_interval_
void AdvanceLastProcessedMarker(UseInterval *to_start_of, LifetimePosition but_not_past) const
void ConvertOperands(Zone *zone)
bool ShouldBeAllocatedBefore(const LiveRange *other) const
LifetimePosition End() const
void set_assigned_register(int reg, Zone *zone)
bool CanCover(LifetimePosition position) const
UseInterval * FirstSearchIntervalForPosition(LifetimePosition position) const
UsePosition * first_pos() const
UsePosition * last_processed_use_
bool CanBeSpilled(LifetimePosition pos)
void SplitAt(LifetimePosition position, LiveRange *result, Zone *zone)
void MakeSpilled(Zone *zone)
UsePosition * NextUsePosition(LifetimePosition start)
void AddUsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint, Zone *zone)
UseInterval * next() const
void set_next(UseInterval *next)
UseInterval(LifetimePosition start, LifetimePosition end)
LifetimePosition end() const
void set_start(LifetimePosition start)
LifetimePosition start() const
void SplitAt(LifetimePosition pos, Zone *zone)
bool Contains(LifetimePosition point) const
LifetimePosition Intersect(const UseInterval *other) const
LifetimePosition const pos_
bool register_beneficial_
LifetimePosition pos() const
void set_next(UsePosition *next)
bool RequiresRegister() const
bool RegisterIsBeneficial() const
UsePosition * next() const
LOperand * operand() const
UsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
PerThreadAssertScopeDebugOnly< HANDLE_DEREFERENCE_ASSERT, true > AllowHandleDereference
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
void PrintF(const char *format,...)
static int UnhandledSortHelper(LiveRange *const *a, LiveRange *const *b)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
Debugger support for the V8 JavaScript engine.
static const char * AllocationIndexToString(int index)
static int NumAllocatableRegisters()
static const int kMaxNumAllocatableRegisters
static int NumAllocatableRegisters()
static const char * AllocationIndexToString(int index)
static const int kMaxNumAllocatableRegisters