32 register_beneficial_(
true) {
65 void LiveRange::Verify()
const {
75 bool LiveRange::HasOverlap(UseInterval* target)
const {
77 while (current_interval !=
NULL) {
79 if (current_interval->Contains(target->start()) ||
80 target->Contains(current_interval->start())) {
83 current_interval = current_interval->
next();
96 is_non_loop_phi_(
false),
98 assigned_register_(kInvalidAssignment),
100 first_interval_(
NULL),
104 current_interval_(
NULL),
105 last_processed_use_(
NULL),
106 current_hint_operand_(
NULL),
134 DCHECK(!operand->IsUnallocated());
145 use_pos = use_pos->
next();
187 if (use_pos ==
NULL)
return true;
210 DCHECK(!op->IsUnallocated());
234 if (to_start_of ==
NULL)
return;
256 bool split_at_start =
false;
263 while (current !=
NULL) {
265 current->
SplitAt(position, zone);
269 if (
next->start().Value() >= position.
Value()) {
270 split_at_start = (
next->start().Value() == position.
Value());
290 if (split_at_start) {
295 use_before = use_after;
296 use_after = use_after->
next();
300 use_before = use_after;
301 use_after = use_after->
next();
306 if (use_before !=
NULL) {
342 if (pos ==
NULL)
return false;
344 if (other_pos ==
NULL)
return true;
352 RegisterAllocator::TraceAlloc(
"Shorten live range %d to [%d\n",
id_,
363 RegisterAllocator::TraceAlloc(
"Ensure live range %d in interval [%d %d[\n",
385 RegisterAllocator::TraceAlloc(
"Add to live range %d interval [%d %d[\n",
id_,
413 RegisterAllocator::TraceAlloc(
"Add to live range %d use position %d\n",
id_,
420 prev_hint = current->
HasHint() ? current : prev_hint;
422 current = current->
next();
430 prev->
next_ = use_pos;
442 while (use_pos !=
NULL) {
447 DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
451 use_pos = use_pos->
next();
464 if (!
CanCover(position))
return false;
467 interval = interval->next()) {
469 interval->next()->start().Value() >= interval->start().Value());
471 if (interval->Contains(position))
return true;
472 if (interval->start().Value() > position.
Value())
return false;
487 if (cur_intersection.
IsValid()) {
488 return cur_intersection;
503 : zone_(code->isolate()),
505 live_in_sets_(code->BasicBlockCount(), zone()),
506 live_ranges_(code->VirtualRegisterCount() * 2, zone()),
507 fixed_live_ranges_(
NULL),
508 fixed_double_live_ranges_(
NULL),
509 unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
510 active_live_ranges_(8, zone()),
511 inactive_live_ranges_(8, zone()),
512 reusable_slots_(8, zone()),
515 allocation_ok_(
true) {}
518 void RegisterAllocator::InitializeLivenessAnalysis() {
520 int block_count = code()->BasicBlockCount();
521 live_in_sets_.Initialize(block_count, zone());
522 live_in_sets_.AddBlock(
NULL, block_count, zone());
526 BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
529 BitVector* live_out =
530 new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
533 BasicBlock::Successors successors = block->successors();
534 for (BasicBlock::Successors::iterator
i = successors.begin();
535 i != successors.end(); ++
i) {
538 BasicBlock* successor = *
i;
539 BitVector* live_in = live_in_sets_[successor->rpo_number_];
540 if (live_in !=
NULL) live_out->Union(*live_in);
544 int index = successor->PredecessorIndexOf(block);
546 DCHECK(index <
static_cast<int>(successor->PredecessorCount()));
547 for (BasicBlock::const_iterator j = successor->begin();
548 j != successor->end(); ++j) {
550 if (phi->opcode() != IrOpcode::kPhi)
continue;
551 Node* input = phi->InputAt(index);
552 live_out->Add(input->id());
560 void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
561 BitVector* live_out) {
564 LifetimePosition start =
568 BitVector::Iterator iterator(live_out);
569 while (!iterator.Done()) {
570 int operand_index = iterator.Current();
571 LiveRange* range = LiveRangeFor(operand_index);
572 range->AddUseInterval(start, end, zone());
578 int RegisterAllocator::FixedDoubleLiveRangeID(
int index) {
583 InstructionOperand* RegisterAllocator::AllocateFixed(
584 UnallocatedOperand* operand,
int pos,
bool is_tagged) {
585 TraceAlloc(
"Allocating fixed reg for op %d\n", operand->virtual_register());
586 DCHECK(operand->HasFixedPolicy());
587 if (operand->HasFixedSlotPolicy()) {
589 operand->fixed_slot_index());
590 }
else if (operand->HasFixedRegisterPolicy()) {
591 int reg_index = operand->fixed_register_index();
593 }
else if (operand->HasFixedDoubleRegisterPolicy()) {
594 int reg_index = operand->fixed_register_index();
600 TraceAlloc(
"Fixed reg is tagged at %d\n", pos);
601 Instruction* instr = InstructionAt(pos);
602 if (instr->HasPointerMap()) {
603 instr->pointer_map()->RecordPointer(operand, code_zone());
610 LiveRange* RegisterAllocator::FixedLiveRangeFor(
int index) {
612 LiveRange* result = fixed_live_ranges_[index];
613 if (result ==
NULL) {
618 result =
new (zone()) LiveRange(FixedLiveRangeID(index), code_zone());
619 DCHECK(result->IsFixed());
621 SetLiveRangeAssignedRegister(result, index);
622 fixed_live_ranges_[index] = result;
628 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(
int index) {
630 LiveRange* result = fixed_double_live_ranges_[index];
631 if (result ==
NULL) {
632 result =
new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
633 DCHECK(result->IsFixed());
635 SetLiveRangeAssignedRegister(result, index);
636 fixed_double_live_ranges_[index] = result;
642 LiveRange* RegisterAllocator::LiveRangeFor(
int index) {
643 if (index >= live_ranges_.length()) {
644 live_ranges_.AddBlock(
NULL, index - live_ranges_.length() + 1, zone());
646 LiveRange* result = live_ranges_[index];
647 if (result ==
NULL) {
648 result =
new (zone()) LiveRange(index, code_zone());
649 live_ranges_[index] = result;
655 GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
656 int last_instruction = block->last_instruction_index();
657 return code()->GapAt(last_instruction - 1);
661 LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
662 if (operand->IsUnallocated()) {
664 }
else if (operand->IsRegister()) {
665 return FixedLiveRangeFor(operand->index());
666 }
else if (operand->IsDoubleRegister()) {
667 return FixedDoubleLiveRangeFor(operand->index());
674 void RegisterAllocator::Define(LifetimePosition position,
675 InstructionOperand* operand,
676 InstructionOperand* hint) {
677 LiveRange* range = LiveRangeFor(operand);
678 if (range ==
NULL)
return;
680 if (range->IsEmpty() || range->Start().Value() > position.Value()) {
682 range->AddUseInterval(position, position.NextInstruction(), zone());
683 range->AddUsePosition(position.NextInstruction(),
NULL,
NULL, zone());
685 range->ShortenTo(position);
688 if (operand->IsUnallocated()) {
690 range->AddUsePosition(position, unalloc_operand, hint, zone());
695 void RegisterAllocator::Use(LifetimePosition block_start,
696 LifetimePosition position,
697 InstructionOperand* operand,
698 InstructionOperand* hint) {
699 LiveRange* range = LiveRangeFor(operand);
700 if (range ==
NULL)
return;
701 if (operand->IsUnallocated()) {
703 range->AddUsePosition(position, unalloc_operand, hint, zone());
705 range->AddUseInterval(block_start, position, zone());
709 void RegisterAllocator::AddConstraintsGapMove(
int index,
710 InstructionOperand* from,
711 InstructionOperand*
to) {
712 GapInstruction* gap = code()->GapAt(index);
715 if (from->IsUnallocated()) {
716 const ZoneList<MoveOperands>* move_operands = move->move_operands();
717 for (
int i = 0;
i < move_operands->length(); ++
i) {
718 MoveOperands cur = move_operands->at(
i);
719 InstructionOperand* cur_to = cur.destination();
720 if (cur_to->IsUnallocated()) {
723 move->AddMove(cur.source(),
to, code_zone());
729 move->AddMove(from,
to, code_zone());
733 void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
734 int start = block->first_instruction_index();
735 int end = block->last_instruction_index();
737 for (
int i = start;
i <= end; ++
i) {
738 if (code()->IsGapAt(
i)) {
739 Instruction* instr =
NULL;
740 Instruction* prev_instr =
NULL;
741 if (
i < end) instr = InstructionAt(
i + 1);
742 if (
i > start) prev_instr = InstructionAt(
i - 1);
743 MeetConstraintsBetween(prev_instr, instr,
i);
744 if (!AllocationOk())
return;
749 if (!code()->IsGapAt(end)) {
750 MeetRegisterConstraintsForLastInstructionInBlock(block);
755 void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
757 int end = block->last_instruction_index();
758 Instruction* last_instruction = InstructionAt(end);
759 for (
size_t i = 0;
i < last_instruction->OutputCount();
i++) {
760 InstructionOperand* output_operand = last_instruction->OutputAt(
i);
761 DCHECK(!output_operand->IsConstant());
763 int output_vreg = output->virtual_register();
764 LiveRange* range = LiveRangeFor(output_vreg);
765 bool assigned =
false;
766 if (output->HasFixedPolicy()) {
767 AllocateFixed(output, -1,
false);
769 if (output->IsStackSlot()) {
770 range->SetSpillOperand(output);
771 range->SetSpillStartIndex(end);
775 BasicBlock::Successors successors = block->successors();
776 for (BasicBlock::Successors::iterator succ = successors.begin();
777 succ != successors.end(); ++succ) {
778 DCHECK((*succ)->PredecessorCount() == 1);
779 int gap_index = (*succ)->first_instruction_index() + 1;
780 DCHECK(code()->IsGapAt(gap_index));
784 UnallocatedOperand* output_copy =
786 output_copy->set_virtual_register(output_vreg);
788 code()->AddGapMove(gap_index, output, output_copy);
793 BasicBlock::Successors successors = block->successors();
794 for (BasicBlock::Successors::iterator succ = successors.begin();
795 succ != successors.end(); ++succ) {
796 DCHECK((*succ)->PredecessorCount() == 1);
797 int gap_index = (*succ)->first_instruction_index() + 1;
798 range->SetSpillStartIndex(gap_index);
804 GapInstruction* gap = code()->GapAt(gap_index);
807 move->AddMove(output, range->GetSpillOperand(), code_zone());
814 void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
819 for (
size_t i = 0;
i < first->TempCount();
i++) {
821 if (temp->HasFixedPolicy()) {
822 AllocateFixed(temp, gap_index - 1,
false);
827 for (
size_t i = 0;
i < first->OutputCount();
i++) {
828 InstructionOperand* output = first->OutputAt(
i);
829 if (output->IsConstant()) {
830 int output_vreg = output->index();
831 LiveRange* range = LiveRangeFor(output_vreg);
832 range->SetSpillStartIndex(gap_index - 1);
833 range->SetSpillOperand(output);
836 LiveRange* range = LiveRangeFor(first_output->virtual_register());
837 bool assigned =
false;
838 if (first_output->HasFixedPolicy()) {
839 UnallocatedOperand* output_copy =
840 first_output->CopyUnconstrained(code_zone());
841 bool is_tagged = HasTaggedValue(first_output->virtual_register());
842 AllocateFixed(first_output, gap_index, is_tagged);
845 if (first_output->IsStackSlot()) {
846 range->SetSpillOperand(first_output);
847 range->SetSpillStartIndex(gap_index - 1);
850 code()->AddGapMove(gap_index, first_output, output_copy);
856 range->SetSpillStartIndex(gap_index);
862 GapInstruction* gap = code()->GapAt(gap_index);
865 move->AddMove(first_output, range->GetSpillOperand(), code_zone());
871 if (second !=
NULL) {
873 for (
size_t i = 0;
i < second->InputCount();
i++) {
874 InstructionOperand* input = second->InputAt(
i);
875 if (input->IsImmediate())
continue;
877 if (cur_input->HasFixedPolicy()) {
878 UnallocatedOperand* input_copy =
879 cur_input->CopyUnconstrained(code_zone());
880 bool is_tagged = HasTaggedValue(cur_input->virtual_register());
881 AllocateFixed(cur_input, gap_index + 1, is_tagged);
882 AddConstraintsGapMove(gap_index, input_copy, cur_input);
887 for (
size_t i = 0;
i < second->OutputCount();
i++) {
888 InstructionOperand* output = second->OutputAt(
i);
889 if (!output->IsUnallocated())
continue;
891 if (second_output->HasSameAsInputPolicy()) {
893 UnallocatedOperand* cur_input =
895 int output_vreg = second_output->virtual_register();
896 int input_vreg = cur_input->virtual_register();
898 UnallocatedOperand* input_copy =
899 cur_input->CopyUnconstrained(code_zone());
900 cur_input->set_virtual_register(second_output->virtual_register());
901 AddConstraintsGapMove(gap_index, input_copy, cur_input);
903 if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
904 int index = gap_index + 1;
905 Instruction* instr = InstructionAt(index);
906 if (instr->HasPointerMap()) {
907 instr->pointer_map()->RecordPointer(input_copy, code_zone());
909 }
else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
923 bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr,
int index) {
924 for (
size_t i = 0;
i < instr->OutputCount();
i++) {
925 InstructionOperand* output = instr->OutputAt(
i);
926 if (output->IsRegister() && output->index() == index)
return true;
932 bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
934 for (
size_t i = 0;
i < instr->OutputCount();
i++) {
935 InstructionOperand* output = instr->OutputAt(
i);
936 if (output->IsDoubleRegister() && output->index() == index)
return true;
942 void RegisterAllocator::ProcessInstructions(BasicBlock* block,
944 int block_start = block->first_instruction_index();
946 LifetimePosition block_start_position =
949 for (
int index = block->last_instruction_index(); index >= block_start;
951 LifetimePosition curr_position =
954 Instruction* instr = InstructionAt(index);
956 if (instr->IsGapMoves()) {
958 GapInstruction* gap = code()->GapAt(index);
963 const ZoneList<MoveOperands>* move_operands = move->move_operands();
964 for (
int i = 0;
i < move_operands->length(); ++
i) {
965 MoveOperands* cur = &move_operands->at(
i);
966 if (cur->IsIgnored())
continue;
967 InstructionOperand* from = cur->source();
968 InstructionOperand*
to = cur->destination();
969 InstructionOperand* hint =
to;
970 if (
to->IsUnallocated()) {
972 LiveRange* to_range = LiveRangeFor(to_vreg);
973 if (to_range->is_phi()) {
974 if (to_range->is_non_loop_phi()) {
975 hint = to_range->current_hint_operand();
978 if (live->Contains(to_vreg)) {
979 Define(curr_position,
to, from);
980 live->Remove(to_vreg);
987 Define(curr_position,
to, from);
989 Use(block_start_position, curr_position, from, hint);
990 if (from->IsUnallocated()) {
996 for (
size_t i = 0;
i < instr->OutputCount();
i++) {
997 InstructionOperand* output = instr->OutputAt(
i);
998 if (output->IsUnallocated()) {
1000 live->Remove(out_vreg);
1001 }
else if (output->IsConstant()) {
1002 int out_vreg = output->index();
1003 live->Remove(out_vreg);
1005 Define(curr_position, output,
NULL);
1008 if (instr->ClobbersRegisters()) {
1010 if (!IsOutputRegisterOf(instr,
i)) {
1011 LiveRange* range = FixedLiveRangeFor(
i);
1012 range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1018 if (instr->ClobbersDoubleRegisters()) {
1021 if (!IsOutputDoubleRegisterOf(instr,
i)) {
1022 LiveRange* range = FixedDoubleLiveRangeFor(
i);
1023 range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1029 for (
size_t i = 0;
i < instr->InputCount();
i++) {
1030 InstructionOperand* input = instr->InputAt(
i);
1031 if (input->IsImmediate())
continue;
1032 LifetimePosition use_pos;
1033 if (input->IsUnallocated() &&
1035 use_pos = curr_position;
1037 use_pos = curr_position.InstructionEnd();
1040 Use(block_start_position, use_pos, input,
NULL);
1041 if (input->IsUnallocated()) {
1046 for (
size_t i = 0;
i < instr->TempCount();
i++) {
1047 InstructionOperand* temp = instr->TempAt(
i);
1048 if (instr->ClobbersTemps()) {
1049 if (temp->IsRegister())
continue;
1050 if (temp->IsUnallocated()) {
1052 if (temp_unalloc->HasFixedPolicy()) {
1057 Use(block_start_position, curr_position.InstructionEnd(), temp,
NULL);
1058 Define(curr_position, temp,
NULL);
1065 void RegisterAllocator::ResolvePhis(BasicBlock* block) {
1066 for (BasicBlock::const_iterator
i = block->begin();
i != block->end(); ++
i) {
1068 if (phi->opcode() != IrOpcode::kPhi)
continue;
1070 UnallocatedOperand* phi_operand =
1072 phi_operand->set_virtual_register(phi->id());
1075 Node::Inputs inputs = phi->inputs();
1076 for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
1080 if (j >= block->PredecessorCount())
continue;
1081 UnallocatedOperand* operand =
1083 operand->set_virtual_register(op->id());
1084 BasicBlock* cur_block = block->PredecessorAt(j);
1087 code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
1090 Instruction* branch = InstructionAt(cur_block->last_instruction_index());
1091 DCHECK(!branch->HasPointerMap());
1095 LiveRange* live_range = LiveRangeFor(phi->id());
1096 BlockStartInstruction* block_start = code()->GetBlockStart(block);
1098 ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
1099 live_range->SetSpillStartIndex(block->first_instruction_index());
1102 live_range->set_is_phi(
true);
1103 if (!block->IsLoopHeader()) {
1104 live_range->set_is_non_loop_phi(
true);
1110 bool RegisterAllocator::Allocate() {
1111 assigned_registers_ =
new (code_zone())
1113 assigned_double_registers_ =
new (code_zone())
1115 MeetRegisterConstraints();
1116 if (!AllocationOk())
return false;
1119 AllocateGeneralRegisters();
1120 if (!AllocationOk())
return false;
1121 AllocateDoubleRegisters();
1122 if (!AllocationOk())
return false;
1123 PopulatePointerMaps();
1125 ResolveControlFlow();
1126 code()->frame()->SetAllocatedRegisters(assigned_registers_);
1127 code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1132 void RegisterAllocator::MeetRegisterConstraints() {
1133 RegisterAllocatorPhase phase(
"L_Register constraints",
this);
1134 for (
int i = 0;
i < code()->BasicBlockCount(); ++
i) {
1135 MeetRegisterConstraints(code()->BlockAt(
i));
1136 if (!AllocationOk())
return;
1141 void RegisterAllocator::ResolvePhis() {
1142 RegisterAllocatorPhase phase(
"L_Resolve phis",
this);
1145 for (
int i = code()->BasicBlockCount() - 1;
i >= 0; --
i) {
1146 ResolvePhis(code()->BlockAt(
i));
1151 void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
1153 LifetimePosition pred_end =
1155 LifetimePosition cur_start =
1157 LiveRange* pred_cover =
NULL;
1158 LiveRange* cur_cover =
NULL;
1159 LiveRange* cur_range = range;
1160 while (cur_range !=
NULL && (cur_cover ==
NULL || pred_cover ==
NULL)) {
1161 if (cur_range->CanCover(cur_start)) {
1163 cur_cover = cur_range;
1165 if (cur_range->CanCover(pred_end)) {
1167 pred_cover = cur_range;
1169 cur_range = cur_range->next();
1172 if (cur_cover->IsSpilled())
return;
1174 if (pred_cover != cur_cover) {
1175 InstructionOperand* pred_op =
1176 pred_cover->CreateAssignedOperand(code_zone());
1177 InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
1178 if (!pred_op->Equals(cur_op)) {
1179 GapInstruction* gap =
NULL;
1180 if (block->PredecessorCount() == 1) {
1181 gap = code()->GapAt(block->first_instruction_index());
1183 DCHECK(pred->SuccessorCount() == 1);
1184 gap = GetLastGap(pred);
1186 Instruction* branch = InstructionAt(pred->last_instruction_index());
1187 DCHECK(!branch->HasPointerMap());
1191 ->AddMove(pred_op, cur_op, code_zone());
1197 ParallelMove* RegisterAllocator::GetConnectingParallelMove(
1198 LifetimePosition pos) {
1199 int index = pos.InstructionIndex();
1200 if (code()->IsGapAt(index)) {
1201 GapInstruction* gap = code()->GapAt(index);
1202 return gap->GetOrCreateParallelMove(
1206 int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
1207 return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
1213 BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
1214 return code()->GetBasicBlock(pos.InstructionIndex());
1218 void RegisterAllocator::ConnectRanges() {
1219 RegisterAllocatorPhase phase(
"L_Connect ranges",
this);
1220 for (
int i = 0;
i < live_ranges()->length(); ++
i) {
1221 LiveRange* first_range = live_ranges()->at(
i);
1222 if (first_range ==
NULL || first_range->parent() !=
NULL)
continue;
1224 LiveRange* second_range = first_range->next();
1225 while (second_range !=
NULL) {
1226 LifetimePosition pos = second_range->Start();
1228 if (!second_range->IsSpilled()) {
1231 if (first_range->End().Value() == pos.Value()) {
1232 bool should_insert =
true;
1233 if (IsBlockBoundary(pos)) {
1234 should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
1236 if (should_insert) {
1237 ParallelMove* move = GetConnectingParallelMove(pos);
1238 InstructionOperand* prev_operand =
1239 first_range->CreateAssignedOperand(code_zone());
1240 InstructionOperand* cur_operand =
1241 second_range->CreateAssignedOperand(code_zone());
1242 move->AddMove(prev_operand, cur_operand, code_zone());
1247 first_range = second_range;
1248 second_range = second_range->next();
1254 bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block)
const {
1255 if (block->PredecessorCount() != 1)
return false;
1256 return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1;
1260 void RegisterAllocator::ResolveControlFlow() {
1261 RegisterAllocatorPhase phase(
"L_Resolve control flow",
this);
1262 for (
int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) {
1263 BasicBlock* block = code()->BlockAt(block_id);
1264 if (CanEagerlyResolveControlFlow(block))
continue;
1265 BitVector* live = live_in_sets_[block->rpo_number_];
1266 BitVector::Iterator iterator(live);
1267 while (!iterator.Done()) {
1268 int operand_index = iterator.Current();
1269 BasicBlock::Predecessors predecessors = block->predecessors();
1270 for (BasicBlock::Predecessors::iterator
i = predecessors.begin();
1271 i != predecessors.end(); ++
i) {
1272 BasicBlock* cur = *
i;
1273 LiveRange* cur_range = LiveRangeFor(operand_index);
1274 ResolveControlFlow(cur_range, block, cur);
1282 void RegisterAllocator::BuildLiveRanges() {
1283 RegisterAllocatorPhase phase(
"L_Build live ranges",
this);
1284 InitializeLivenessAnalysis();
1286 for (
int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
1288 BasicBlock* block = code()->BlockAt(block_id);
1289 BitVector* live = ComputeLiveOut(block);
1292 AddInitialIntervals(block, live);
1296 ProcessInstructions(block, live);
1298 for (BasicBlock::const_iterator
i = block->begin();
i != block->end();
1301 if (phi->opcode() != IrOpcode::kPhi)
continue;
1305 live->Remove(phi->id());
1307 InstructionOperand* hint =
NULL;
1308 InstructionOperand* phi_operand =
NULL;
1309 GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
1312 ParallelMove* move =
1314 for (
int j = 0; j < move->move_operands()->length(); ++j) {
1315 InstructionOperand*
to = move->move_operands()->at(j).destination();
1316 if (
to->IsUnallocated() &&
1318 hint = move->move_operands()->at(j).source();
1326 block->first_instruction_index());
1327 Define(block_start, phi_operand, hint);
1332 live_in_sets_[block_id] = live;
1334 if (block->IsLoopHeader()) {
1337 BitVector::Iterator iterator(live);
1339 block->first_instruction_index());
1341 code()->BlockAt(block->loop_end_)->last_instruction_index();
1342 LifetimePosition end =
1344 while (!iterator.Done()) {
1345 int operand_index = iterator.Current();
1346 LiveRange* range = LiveRangeFor(operand_index);
1347 range->EnsureInterval(start, end, zone());
1352 for (
int i = block->rpo_number_ + 1; i < block->loop_end_; ++
i) {
1353 live_in_sets_[
i]->Union(*live);
1358 if (block_id == 0) {
1359 BitVector::Iterator iterator(live);
1361 while (!iterator.Done()) {
1363 int operand_index = iterator.Current();
1364 PrintF(
"Register allocator error: live v%d reached first block.\n",
1366 LiveRange* range = LiveRangeFor(operand_index);
1367 PrintF(
" (first use is at %d)\n", range->first_pos()->pos().Value());
1368 CompilationInfo* info = code()->linkage()->info();
1369 if (info->IsStub()) {
1370 if (info->code_stub() ==
NULL) {
1373 CodeStub::Major major_key = info->code_stub()->MajorKey();
1374 PrintF(
" (function: %s)\n", CodeStub::MajorName(major_key,
false));
1377 DCHECK(info->IsOptimizing());
1379 PrintF(
" (function: %s)\n",
1380 info->function()->debug_name()->ToCString().get());
1389 for (
int i = 0;
i < live_ranges_.length(); ++
i) {
1390 if (live_ranges_[
i] !=
NULL) {
1391 live_ranges_[
i]->kind_ = RequiredRegisterKind(live_ranges_[
i]->
id());
1397 LiveRange* range = live_ranges_[
i];
1398 if (range->HasAllocatedSpillOperand() &&
1399 range->GetSpillOperand()->IsConstant()) {
1400 for (UsePosition* pos = range->first_pos(); pos !=
NULL;
1402 pos->register_beneficial_ =
true;
1403 pos->requires_reg_ =
true;
1411 bool RegisterAllocator::SafePointsAreInOrder()
const {
1414 for (PointerMapDeque::const_iterator it = pointer_maps->begin();
1415 it != pointer_maps->end(); ++it) {
1416 PointerMap*
map = *it;
1417 if (safe_point >
map->instruction_position())
return false;
1418 safe_point =
map->instruction_position();
1424 void RegisterAllocator::PopulatePointerMaps() {
1425 RegisterAllocatorPhase phase(
"L_Populate pointer maps",
this);
1427 DCHECK(SafePointsAreInOrder());
1431 int last_range_start = 0;
1433 PointerMapDeque::const_iterator first_it = pointer_maps->begin();
1434 for (
int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
1435 LiveRange* range = live_ranges()->at(range_idx);
1436 if (range ==
NULL)
continue;
1438 if (range->parent() !=
NULL)
continue;
1440 if (!HasTaggedValue(range->id()))
continue;
1442 if (range->IsEmpty())
continue;
1445 int start = range->Start().InstructionIndex();
1447 for (LiveRange* cur = range; cur !=
NULL; cur = cur->next()) {
1448 LifetimePosition this_end = cur->End();
1449 if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
1450 DCHECK(cur->Start().InstructionIndex() >= start);
1455 if (start < last_range_start) first_it = pointer_maps->begin();
1456 last_range_start = start;
1460 for (; first_it != pointer_maps->end(); ++first_it) {
1461 PointerMap*
map = *first_it;
1462 if (
map->instruction_position() >= start)
break;
1466 for (PointerMapDeque::const_iterator it = first_it;
1467 it != pointer_maps->end(); ++it) {
1468 PointerMap*
map = *it;
1469 int safe_point =
map->instruction_position();
1472 if (safe_point - 1 > end)
break;
1476 LifetimePosition safe_point_pos =
1478 LiveRange* cur = range;
1479 while (cur !=
NULL && !cur->Covers(safe_point_pos)) {
1482 if (cur ==
NULL)
continue;
1486 if (range->HasAllocatedSpillOperand() &&
1487 safe_point >= range->spill_start_index() &&
1488 !range->GetSpillOperand()->IsConstant()) {
1489 TraceAlloc(
"Pointer for range %d (spilled at %d) at safe point %d\n",
1490 range->id(), range->spill_start_index(), safe_point);
1491 map->RecordPointer(range->GetSpillOperand(), code_zone());
1494 if (!cur->IsSpilled()) {
1496 "Pointer in register for range %d (start at %d) "
1497 "at safe point %d\n",
1498 cur->id(), cur->Start().Value(), safe_point);
1499 InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
1500 DCHECK(!operand->IsStackSlot());
1501 map->RecordPointer(operand, code_zone());
1508 void RegisterAllocator::AllocateGeneralRegisters() {
1509 RegisterAllocatorPhase phase(
"L_Allocate general registers",
this);
1512 AllocateRegisters();
1516 void RegisterAllocator::AllocateDoubleRegisters() {
1517 RegisterAllocatorPhase phase(
"L_Allocate double registers",
this);
1520 AllocateRegisters();
1524 void RegisterAllocator::AllocateRegisters() {
1525 DCHECK(unhandled_live_ranges_.is_empty());
1527 for (
int i = 0;
i < live_ranges_.length(); ++
i) {
1528 if (live_ranges_[
i] !=
NULL) {
1529 if (live_ranges_[
i]->Kind() == mode_) {
1530 AddToUnhandledUnsorted(live_ranges_[
i]);
1535 DCHECK(UnhandledIsSorted());
1537 DCHECK(reusable_slots_.is_empty());
1538 DCHECK(active_live_ranges_.is_empty());
1539 DCHECK(inactive_live_ranges_.is_empty());
1543 LiveRange* current = fixed_double_live_ranges_.at(
i);
1544 if (current !=
NULL) {
1545 AddToInactive(current);
1550 for (
int i = 0;
i < fixed_live_ranges_.length(); ++
i) {
1551 LiveRange* current = fixed_live_ranges_.at(
i);
1552 if (current !=
NULL) {
1553 AddToInactive(current);
1558 while (!unhandled_live_ranges_.is_empty()) {
1559 DCHECK(UnhandledIsSorted());
1560 LiveRange* current = unhandled_live_ranges_.RemoveLast();
1561 DCHECK(UnhandledIsSorted());
1562 LifetimePosition position = current->Start();
1564 allocation_finger_ = position;
1566 TraceAlloc(
"Processing interval %d start=%d\n", current->id(),
1569 if (current->HasAllocatedSpillOperand()) {
1570 TraceAlloc(
"Live range %d already has a spill operand\n", current->id());
1571 LifetimePosition next_pos = position;
1572 if (code()->IsGapAt(next_pos.InstructionIndex())) {
1573 next_pos = next_pos.NextInstruction();
1575 UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
1581 }
else if (pos->pos().Value() >
1582 current->Start().NextInstruction().Value()) {
1585 SpillBetween(current, current->Start(), pos->pos());
1586 if (!AllocationOk())
return;
1587 DCHECK(UnhandledIsSorted());
1592 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1593 LiveRange* cur_active = active_live_ranges_.at(
i);
1594 if (cur_active->End().Value() <= position.Value()) {
1595 ActiveToHandled(cur_active);
1597 }
else if (!cur_active->Covers(position)) {
1598 ActiveToInactive(cur_active);
1603 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
1604 LiveRange* cur_inactive = inactive_live_ranges_.at(
i);
1605 if (cur_inactive->End().Value() <= position.Value()) {
1606 InactiveToHandled(cur_inactive);
1608 }
else if (cur_inactive->Covers(position)) {
1609 InactiveToActive(cur_inactive);
1614 DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
1616 bool result = TryAllocateFreeReg(current);
1617 if (!AllocationOk())
return;
1619 if (!result) AllocateBlockedReg(current);
1620 if (!AllocationOk())
return;
1622 if (current->HasRegisterAssigned()) {
1623 AddToActive(current);
1627 reusable_slots_.Rewind(0);
1628 active_live_ranges_.Rewind(0);
1629 inactive_live_ranges_.Rewind(0);
1633 const char* RegisterAllocator::RegisterName(
int allocation_index) {
1642 void RegisterAllocator::TraceAlloc(
const char* msg, ...) {
1643 if (FLAG_trace_alloc) {
1645 va_start(arguments, msg);
1652 bool RegisterAllocator::HasTaggedValue(
int virtual_register)
const {
1653 return code()->IsReference(virtual_register);
1658 int virtual_register)
const {
1664 void RegisterAllocator::AddToActive(LiveRange* range) {
1665 TraceAlloc(
"Add live range %d to active\n", range->id());
1666 active_live_ranges_.Add(range, zone());
1670 void RegisterAllocator::AddToInactive(LiveRange* range) {
1671 TraceAlloc(
"Add live range %d to inactive\n", range->id());
1672 inactive_live_ranges_.Add(range, zone());
1676 void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
1677 if (range ==
NULL || range->IsEmpty())
return;
1678 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
1679 DCHECK(allocation_finger_.Value() <= range->Start().Value());
1680 for (
int i = unhandled_live_ranges_.length() - 1;
i >= 0; --
i) {
1681 LiveRange* cur_range = unhandled_live_ranges_.at(
i);
1682 if (range->ShouldBeAllocatedBefore(cur_range)) {
1683 TraceAlloc(
"Add live range %d to unhandled at %d\n", range->id(),
i + 1);
1684 unhandled_live_ranges_.InsertAt(
i + 1, range, zone());
1685 DCHECK(UnhandledIsSorted());
1689 TraceAlloc(
"Add live range %d to unhandled at start\n", range->id());
1690 unhandled_live_ranges_.InsertAt(0, range, zone());
1691 DCHECK(UnhandledIsSorted());
1695 void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
1696 if (range ==
NULL || range->IsEmpty())
return;
1697 DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
1698 TraceAlloc(
"Add live range %d to unhandled unsorted at end\n", range->id());
1699 unhandled_live_ranges_.Add(range, zone());
1704 DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
1705 !(*b)->ShouldBeAllocatedBefore(*a));
1706 if ((*a)->ShouldBeAllocatedBefore(*b))
return 1;
1707 if ((*b)->ShouldBeAllocatedBefore(*a))
return -1;
1708 return (*a)->id() - (*b)->id();
1715 void RegisterAllocator::SortUnhandled() {
1716 TraceAlloc(
"Sort unhandled\n");
1721 bool RegisterAllocator::UnhandledIsSorted() {
1722 int len = unhandled_live_ranges_.length();
1723 for (
int i = 1;
i < len;
i++) {
1724 LiveRange* a = unhandled_live_ranges_.at(
i - 1);
1725 LiveRange* b = unhandled_live_ranges_.at(
i);
1726 if (a->Start().Value() < b->Start().Value())
return false;
1732 void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
1734 if (range->next() !=
NULL)
return;
1736 if (!range->TopLevel()->HasAllocatedSpillOperand())
return;
1738 InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
1739 if (spill_operand->IsConstant())
return;
1740 if (spill_operand->index() >= 0) {
1741 reusable_slots_.Add(range, zone());
1746 InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
1747 if (reusable_slots_.is_empty())
return NULL;
1748 if (reusable_slots_.first()->End().Value() >
1749 range->TopLevel()->Start().Value()) {
1752 InstructionOperand* result =
1753 reusable_slots_.first()->TopLevel()->GetSpillOperand();
1754 reusable_slots_.Remove(0);
1759 void RegisterAllocator::ActiveToHandled(LiveRange* range) {
1760 DCHECK(active_live_ranges_.Contains(range));
1761 active_live_ranges_.RemoveElement(range);
1762 TraceAlloc(
"Moving live range %d from active to handled\n", range->id());
1763 FreeSpillSlot(range);
1767 void RegisterAllocator::ActiveToInactive(LiveRange* range) {
1768 DCHECK(active_live_ranges_.Contains(range));
1769 active_live_ranges_.RemoveElement(range);
1770 inactive_live_ranges_.Add(range, zone());
1771 TraceAlloc(
"Moving live range %d from active to inactive\n", range->id());
1775 void RegisterAllocator::InactiveToHandled(LiveRange* range) {
1776 DCHECK(inactive_live_ranges_.Contains(range));
1777 inactive_live_ranges_.RemoveElement(range);
1778 TraceAlloc(
"Moving live range %d from inactive to handled\n", range->id());
1779 FreeSpillSlot(range);
1783 void RegisterAllocator::InactiveToActive(LiveRange* range) {
1784 DCHECK(inactive_live_ranges_.Contains(range));
1785 inactive_live_ranges_.RemoveElement(range);
1786 active_live_ranges_.Add(range, zone());
1787 TraceAlloc(
"Moving live range %d from inactive to active\n", range->id());
1797 bool RegisterAllocator::TryAllocateFreeReg(
LiveRange* current) {
1800 for (
int i = 0;
i < num_registers_;
i++) {
1804 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1805 LiveRange* cur_active = active_live_ranges_.at(
i);
1810 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
1811 LiveRange* cur_inactive = inactive_live_ranges_.at(
i);
1813 LifetimePosition next_intersection =
1814 cur_inactive->FirstIntersection(current);
1815 if (!next_intersection.IsValid())
continue;
1816 int cur_reg = cur_inactive->assigned_register();
1817 free_until_pos[cur_reg] =
Min(free_until_pos[cur_reg], next_intersection);
1820 InstructionOperand* hint = current->
FirstHint();
1821 if (hint !=
NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
1822 int register_index = hint->
index();
1824 "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
1825 RegisterName(register_index), free_until_pos[register_index].Value(),
1829 if (free_until_pos[register_index].Value() >= current->
End().
Value()) {
1830 TraceAlloc(
"Assigning preferred reg %s to live range %d\n",
1831 RegisterName(register_index), current->
id());
1832 SetLiveRangeAssignedRegister(current, register_index);
1839 for (
int i = 1;
i < RegisterCount(); ++
i) {
1840 if (free_until_pos[
i].Value() > free_until_pos[reg].
Value()) {
1845 LifetimePosition pos = free_until_pos[reg];
1847 if (pos.Value() <= current->
Start().
Value()) {
1852 if (pos.Value() < current->
End().
Value()) {
1855 LiveRange* tail = SplitRangeAt(current, pos);
1856 if (!AllocationOk())
return false;
1857 AddToUnhandledSorted(tail);
1864 TraceAlloc(
"Assigning free reg %s to live range %d\n", RegisterName(reg),
1866 SetLiveRangeAssignedRegister(current, reg);
1872 void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
1873 UsePosition* register_use = current->NextRegisterPosition(current->Start());
1874 if (register_use ==
NULL) {
1885 for (
int i = 0;
i < num_registers_;
i++) {
1889 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1890 LiveRange* range = active_live_ranges_[
i];
1891 int cur_reg = range->assigned_register();
1892 if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
1893 block_pos[cur_reg] = use_pos[cur_reg] =
1896 UsePosition* next_use =
1897 range->NextUsePositionRegisterIsBeneficial(current->Start());
1898 if (next_use ==
NULL) {
1899 use_pos[cur_reg] = range->End();
1901 use_pos[cur_reg] = next_use->pos();
1906 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
1907 LiveRange* range = inactive_live_ranges_.at(
i);
1908 DCHECK(range->End().Value() > current->Start().Value());
1909 LifetimePosition next_intersection = range->FirstIntersection(current);
1910 if (!next_intersection.IsValid())
continue;
1911 int cur_reg = range->assigned_register();
1912 if (range->IsFixed()) {
1913 block_pos[cur_reg] =
Min(block_pos[cur_reg], next_intersection);
1914 use_pos[cur_reg] =
Min(block_pos[cur_reg], use_pos[cur_reg]);
1916 use_pos[cur_reg] =
Min(use_pos[cur_reg], next_intersection);
1921 for (
int i = 1;
i < RegisterCount(); ++
i) {
1922 if (use_pos[
i].Value() > use_pos[reg].Value()) {
1927 LifetimePosition pos = use_pos[reg];
1929 if (pos.Value() < register_use->pos().Value()) {
1932 SpillBetween(current, current->Start(), register_use->pos());
1936 if (block_pos[reg].Value() < current->End().Value()) {
1939 LiveRange* tail = SplitBetween(current, current->Start(),
1940 block_pos[reg].InstructionStart());
1941 if (!AllocationOk())
return;
1942 AddToUnhandledSorted(tail);
1946 DCHECK(block_pos[reg].Value() >= current->End().Value());
1947 TraceAlloc(
"Assigning blocked reg %s to live range %d\n", RegisterName(reg),
1949 SetLiveRangeAssignedRegister(current, reg);
1954 SplitAndSpillIntersecting(current);
1958 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
1959 LiveRange* range, LifetimePosition pos) {
1960 BasicBlock* block = GetBlock(pos.InstructionStart());
1961 BasicBlock* loop_header =
1962 block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
1964 if (loop_header ==
NULL)
return pos;
1966 UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
1968 while (loop_header !=
NULL) {
1973 loop_header->first_instruction_index());
1975 if (range->Covers(loop_start)) {
1976 if (prev_use ==
NULL || prev_use->pos().Value() < loop_start.Value()) {
1983 loop_header = code()->GetContainingLoop(loop_header);
1990 void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
1991 DCHECK(current->HasRegisterAssigned());
1992 int reg = current->assigned_register();
1993 LifetimePosition split_pos = current->Start();
1994 for (
int i = 0;
i < active_live_ranges_.length(); ++
i) {
1995 LiveRange* range = active_live_ranges_[
i];
1996 if (range->assigned_register() == reg) {
1997 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
1998 LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
1999 if (next_pos ==
NULL) {
2000 SpillAfter(range, spill_pos);
2010 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
2012 if (!AllocationOk())
return;
2013 ActiveToHandled(range);
2018 for (
int i = 0;
i < inactive_live_ranges_.length(); ++
i) {
2019 LiveRange* range = inactive_live_ranges_[
i];
2020 DCHECK(range->End().Value() > current->Start().Value());
2021 if (range->assigned_register() == reg && !range->IsFixed()) {
2022 LifetimePosition next_intersection = range->FirstIntersection(current);
2023 if (next_intersection.IsValid()) {
2024 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2025 if (next_pos ==
NULL) {
2026 SpillAfter(range, split_pos);
2028 next_intersection =
Min(next_intersection, next_pos->pos());
2029 SpillBetween(range, split_pos, next_intersection);
2031 if (!AllocationOk())
return;
2032 InactiveToHandled(range);
2040 bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
2041 return pos.IsInstructionStart() &&
2042 InstructionAt(pos.InstructionIndex())->IsBlockStart();
2046 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2047 LifetimePosition pos) {
2048 DCHECK(!range->IsFixed());
2049 TraceAlloc(
"Splitting live range %d at %d\n", range->id(), pos.Value());
2051 if (pos.Value() <= range->Start().Value())
return range;
2055 DCHECK(pos.IsInstructionStart() ||
2056 !InstructionAt(pos.InstructionIndex())->IsControl());
2058 int vreg = GetVirtualRegister();
2059 if (!AllocationOk())
return NULL;
2060 LiveRange* result = LiveRangeFor(vreg);
2061 range->SplitAt(pos, result, zone());
2066 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2067 LifetimePosition start,
2068 LifetimePosition end) {
2069 DCHECK(!range->IsFixed());
2070 TraceAlloc(
"Splitting live range %d in position between [%d, %d]\n",
2071 range->id(), start.Value(), end.Value());
2073 LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2074 DCHECK(split_pos.Value() >= start.Value());
2075 return SplitRangeAt(range, split_pos);
2079 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2080 LifetimePosition end) {
2081 int start_instr = start.InstructionIndex();
2082 int end_instr = end.InstructionIndex();
2083 DCHECK(start_instr <= end_instr);
2086 if (start_instr == end_instr)
return end;
2088 BasicBlock* start_block = GetBlock(start);
2089 BasicBlock* end_block = GetBlock(end);
2091 if (end_block == start_block) {
2097 BasicBlock* block = end_block;
2100 while (code()->GetContainingLoop(block) !=
NULL &&
2101 code()->GetContainingLoop(block)->rpo_number_ >
2102 start_block->rpo_number_) {
2103 block = code()->GetContainingLoop(block);
2108 if (block == end_block && !end_block->IsLoopHeader())
return end;
2111 block->first_instruction_index());
2115 void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
2116 LiveRange* second_part = SplitRangeAt(range, pos);
2117 if (!AllocationOk())
return;
2122 void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
2123 LifetimePosition end) {
2124 SpillBetweenUntil(range, start, start, end);
2128 void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
2129 LifetimePosition start,
2130 LifetimePosition until,
2131 LifetimePosition end) {
2132 CHECK(start.Value() < end.Value());
2133 LiveRange* second_part = SplitRangeAt(range, start);
2134 if (!AllocationOk())
return;
2136 if (second_part->Start().Value() < end.Value()) {
2140 LiveRange* third_part = SplitBetween(
2141 second_part,
Max(second_part->Start().InstructionEnd(), until),
2143 if (!AllocationOk())
return;
2145 DCHECK(third_part != second_part);
2148 AddToUnhandledSorted(third_part);
2152 AddToUnhandledSorted(second_part);
2157 void RegisterAllocator::Spill(LiveRange* range) {
2158 DCHECK(!range->IsSpilled());
2159 TraceAlloc(
"Spilling live range %d\n", range->id());
2160 LiveRange* first = range->TopLevel();
2162 if (!first->HasAllocatedSpillOperand()) {
2163 InstructionOperand* op = TryReuseSpillSlot(range);
2169 op = DoubleStackSlotOperand::Create(index, zone());
2172 op = StackSlotOperand::Create(index, zone());
2175 first->SetSpillOperand(op);
2177 range->MakeSpilled(code_zone());
2181 int RegisterAllocator::RegisterCount()
const {
return num_registers_; }
2187 void RegisterAllocator::Verify()
const {
2188 for (
int i = 0;
i < live_ranges()->length(); ++
i) {
2189 LiveRange* current = live_ranges()->at(
i);
2190 if (current !=
NULL) current->Verify();
2198 void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2201 assigned_double_registers_->Add(reg);
2204 assigned_registers_->Add(reg);
2206 range->set_assigned_register(reg, code_zone());
2211 RegisterAllocator* allocator)
2213 allocator_(allocator) {
2214 if (FLAG_turbo_stats) {
2216 allocator->zone()->allocation_size();
2222 if (FLAG_turbo_stats) {
2225 isolate()->GetTStatistics()->SaveTiming(
name(), base::TimeDelta(),
size);
The superclass of all JavaScript values and objects.
static void VPrint(const char *format, va_list args)
int assigned_register() const
void ConvertTo(Kind kind, int index)
static LifetimePosition MaxPosition()
static LifetimePosition FromInstructionIndex(int index)
LifetimePosition NextInstruction() const
LifetimePosition InstructionEnd() const
LifetimePosition PrevInstruction() const
static LifetimePosition Invalid()
UsePosition * first_pos() const
void MakeSpilled(Zone *zone)
bool HasAllocatedSpillOperand() const
void set_assigned_register(int reg, Zone *zone)
void SetSpillOperand(InstructionOperand *operand)
static const int kInvalidAssignment
void ShortenTo(LifetimePosition start)
UseInterval * first_interval_
UsePosition * NextRegisterPosition(LifetimePosition start)
InstructionOperand * CreateAssignedOperand(Zone *zone)
LifetimePosition FirstIntersection(LiveRange *other)
UseInterval * current_interval_
int assigned_register() const
InstructionOperand * FirstHint() const
bool HasRegisterAssigned() const
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start)
InstructionOperand * current_hint_operand_
UsePosition * PreviousUsePositionRegisterIsBeneficial(LifetimePosition start)
LifetimePosition Start() const
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
UsePosition * NextUsePosition(LifetimePosition start)
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
void ConvertOperands(Zone *zone)
bool CanBeSpilled(LifetimePosition pos)
UsePosition * last_processed_use_
bool ShouldBeAllocatedBefore(const LiveRange *other) const
LiveRange(int id, Zone *zone)
UseInterval * last_interval_
void SplitAt(LifetimePosition position, LiveRange *result, Zone *zone)
bool CanCover(LifetimePosition position) const
InstructionOperand * GetSpillOperand() const
bool Covers(LifetimePosition position)
UseInterval * first_interval() const
LifetimePosition End() const
void AddUsePosition(LifetimePosition pos, InstructionOperand *operand, InstructionOperand *hint, Zone *zone)
void AdvanceLastProcessedMarker(UseInterval *to_start_of, LifetimePosition but_not_past) const
UseInterval * FirstSearchIntervalForPosition(LifetimePosition position) const
InstructionOperand * spill_operand_
RegisterKind Kind() const
unsigned allocator_zone_start_allocation_size_
RegisterAllocatorPhase(const char *name, RegisterAllocator *allocator)
~RegisterAllocatorPhase()
RegisterAllocator * allocator_
static const UnallocatedOperand * cast(const InstructionOperand *op)
bool HasAnyPolicy() const
void set_virtual_register(unsigned id)
bool HasRegisterPolicy() const
int virtual_register() const
LifetimePosition start() const
void set_next(UseInterval *next)
UseInterval(LifetimePosition start, LifetimePosition end)
LifetimePosition Intersect(const UseInterval *other) const
void set_start(LifetimePosition start)
void SplitAt(LifetimePosition pos, Zone *zone)
UseInterval * next() const
LifetimePosition end() const
bool Contains(LifetimePosition point) const
InstructionOperand *const hint_
UsePosition(LifetimePosition pos, InstructionOperand *operand, InstructionOperand *hint)
bool RegisterIsBeneficial() const
bool register_beneficial_
UsePosition * next() const
void set_next(UsePosition *next)
InstructionOperand *const operand_
LifetimePosition pos() const
LifetimePosition const pos_
bool RequiresRegister() const
InstructionOperand * operand() const
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=Register::kMaxNumAllocatableRegisters)
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
ZoneDeque< PointerMap * > PointerMapDeque
static int UnhandledSortHelper(LiveRange *const *a, LiveRange *const *b)
PerThreadAssertScopeDebugOnly< HANDLE_DEREFERENCE_ASSERT, true > AllowHandleDereference
void PrintF(const char *format,...)
Debugger support for the V8 JavaScript engine.
static const char * AllocationIndexToString(int index)
static const int kMaxNumAllocatableRegisters
static int NumAllocatableAliasedRegisters()
static int NumAllocatableRegisters()
static const char * AllocationIndexToString(int index)
static const int kMaxNumAllocatableRegisters