V8 Project
register-allocator.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 #include "src/compiler/linkage.h"
8 #include "src/hydrogen.h"
9 #include "src/string-stream.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
16  return a.Value() < b.Value() ? a : b;
17 }
18 
19 
21  return a.Value() > b.Value() ? a : b;
22 }
23 
24 
26  InstructionOperand* hint)
27  : operand_(operand),
28  hint_(hint),
29  pos_(pos),
30  next_(NULL),
31  requires_reg_(false),
32  register_beneficial_(true) {
33  if (operand_ != NULL && operand_->IsUnallocated()) {
35  requires_reg_ = unalloc->HasRegisterPolicy();
36  register_beneficial_ = !unalloc->HasAnyPolicy();
37  }
38  DCHECK(pos_.IsValid());
39 }
40 
41 
42 bool UsePosition::HasHint() const {
43  return hint_ != NULL && !hint_->IsUnallocated();
44 }
45 
46 
48 
49 
51 
52 
54  DCHECK(Contains(pos) && pos.Value() != start().Value());
55  UseInterval* after = new (zone) UseInterval(pos, end_);
56  after->next_ = next_;
57  next_ = after;
58  end_ = pos;
59 }
60 
61 
62 #ifdef DEBUG
63 
64 
65 void LiveRange::Verify() const {
66  UsePosition* cur = first_pos_;
67  while (cur != NULL) {
68  DCHECK(Start().Value() <= cur->pos().Value() &&
69  cur->pos().Value() <= End().Value());
70  cur = cur->next();
71  }
72 }
73 
74 
75 bool LiveRange::HasOverlap(UseInterval* target) const {
76  UseInterval* current_interval = first_interval_;
77  while (current_interval != NULL) {
78  // Intervals overlap if the start of one is contained in the other.
79  if (current_interval->Contains(target->start()) ||
80  target->Contains(current_interval->start())) {
81  return true;
82  }
83  current_interval = current_interval->next();
84  }
85  return false;
86 }
87 
88 
89 #endif
90 
91 
92 LiveRange::LiveRange(int id, Zone* zone)
93  : id_(id),
94  spilled_(false),
95  is_phi_(false),
96  is_non_loop_phi_(false),
97  kind_(UNALLOCATED_REGISTERS),
98  assigned_register_(kInvalidAssignment),
99  last_interval_(NULL),
100  first_interval_(NULL),
101  first_pos_(NULL),
102  parent_(NULL),
103  next_(NULL),
104  current_interval_(NULL),
105  last_processed_use_(NULL),
106  current_hint_operand_(NULL),
107  spill_operand_(new (zone) InstructionOperand()),
108  spill_start_index_(kMaxInt) {}
109 
110 
113  assigned_register_ = reg;
114  ConvertOperands(zone);
115 }
116 
117 
119  DCHECK(!IsSpilled());
121  spilled_ = true;
123  ConvertOperands(zone);
124 }
125 
126 
129  return !spill_operand_->IsIgnored();
130 }
131 
132 
134  DCHECK(!operand->IsUnallocated());
136  DCHECK(spill_operand_->IsIgnored());
137  spill_operand_->ConvertTo(operand->kind(), operand->index());
138 }
139 
140 
142  UsePosition* use_pos = last_processed_use_;
143  if (use_pos == NULL) use_pos = first_pos();
144  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
145  use_pos = use_pos->next();
146  }
147  last_processed_use_ = use_pos;
148  return use_pos;
149 }
150 
151 
153  LifetimePosition start) {
154  UsePosition* pos = NextUsePosition(start);
155  while (pos != NULL && !pos->RegisterIsBeneficial()) {
156  pos = pos->next();
157  }
158  return pos;
159 }
160 
161 
163  LifetimePosition start) {
164  UsePosition* pos = first_pos();
165  UsePosition* prev = NULL;
166  while (pos != NULL && pos->pos().Value() < start.Value()) {
167  if (pos->RegisterIsBeneficial()) prev = pos;
168  pos = pos->next();
169  }
170  return prev;
171 }
172 
173 
175  UsePosition* pos = NextUsePosition(start);
176  while (pos != NULL && !pos->RequiresRegister()) {
177  pos = pos->next();
178  }
179  return pos;
180 }
181 
182 
184  // We cannot spill a live range that has a use requiring a register
185  // at the current or the immediate next position.
186  UsePosition* use_pos = NextRegisterPosition(pos);
187  if (use_pos == NULL) return true;
188  return use_pos->pos().Value() >
190 }
191 
192 
194  InstructionOperand* op = NULL;
195  if (HasRegisterAssigned()) {
196  DCHECK(!IsSpilled());
197  switch (Kind()) {
198  case GENERAL_REGISTERS:
199  op = RegisterOperand::Create(assigned_register(), zone);
200  break;
201  case DOUBLE_REGISTERS:
202  op = DoubleRegisterOperand::Create(assigned_register(), zone);
203  break;
204  default:
205  UNREACHABLE();
206  }
207  } else if (IsSpilled()) {
209  op = TopLevel()->GetSpillOperand();
210  DCHECK(!op->IsUnallocated());
211  } else {
212  UnallocatedOperand* unalloc =
214  unalloc->set_virtual_register(id_);
215  op = unalloc;
216  }
217  return op;
218 }
219 
220 
222  LifetimePosition position) const {
223  if (current_interval_ == NULL) return first_interval_;
224  if (current_interval_->start().Value() > position.Value()) {
226  return first_interval_;
227  }
228  return current_interval_;
229 }
230 
231 
233  UseInterval* to_start_of, LifetimePosition but_not_past) const {
234  if (to_start_of == NULL) return;
235  if (to_start_of->start().Value() > but_not_past.Value()) return;
239  if (to_start_of->start().Value() > start.Value()) {
240  current_interval_ = to_start_of;
241  }
242 }
243 
244 
246  Zone* zone) {
247  DCHECK(Start().Value() < position.Value());
248  DCHECK(result->IsEmpty());
249  // Find the last interval that ends before the position. If the
250  // position is contained in one of the intervals in the chain, we
251  // split that interval and use the first part.
252  UseInterval* current = FirstSearchIntervalForPosition(position);
253 
254  // If the split position coincides with the beginning of a use interval
255  // we need to split use positons in a special way.
256  bool split_at_start = false;
257 
258  if (current->start().Value() == position.Value()) {
259  // When splitting at start we need to locate the previous use interval.
260  current = first_interval_;
261  }
262 
263  while (current != NULL) {
264  if (current->Contains(position)) {
265  current->SplitAt(position, zone);
266  break;
267  }
268  UseInterval* next = current->next();
269  if (next->start().Value() >= position.Value()) {
270  split_at_start = (next->start().Value() == position.Value());
271  break;
272  }
273  current = next;
274  }
275 
276  // Partition original use intervals to the two live ranges.
277  UseInterval* before = current;
278  UseInterval* after = before->next();
279  result->last_interval_ =
280  (last_interval_ == before)
281  ? after // Only interval in the range after split.
282  : last_interval_; // Last interval of the original range.
283  result->first_interval_ = after;
284  last_interval_ = before;
285 
286  // Find the last use position before the split and the first use
287  // position after it.
288  UsePosition* use_after = first_pos_;
289  UsePosition* use_before = NULL;
290  if (split_at_start) {
291  // The split position coincides with the beginning of a use interval (the
292  // end of a lifetime hole). Use at this position should be attributed to
293  // the split child because split child owns use interval covering it.
294  while (use_after != NULL && use_after->pos().Value() < position.Value()) {
295  use_before = use_after;
296  use_after = use_after->next();
297  }
298  } else {
299  while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
300  use_before = use_after;
301  use_after = use_after->next();
302  }
303  }
304 
305  // Partition original use positions to the two live ranges.
306  if (use_before != NULL) {
307  use_before->next_ = NULL;
308  } else {
309  first_pos_ = NULL;
310  }
311  result->first_pos_ = use_after;
312 
313  // Discard cached iteration state. It might be pointing
314  // to the use that no longer belongs to this live range.
317 
318  // Link the new live range in the chain before any of the other
319  // ranges linked from the range before the split.
320  result->parent_ = (parent_ == NULL) ? this : parent_;
321  result->kind_ = result->parent_->kind_;
322  result->next_ = next_;
323  next_ = result;
324 
325 #ifdef DEBUG
326  Verify();
327  result->Verify();
328 #endif
329 }
330 
331 
332 // This implements an ordering on live ranges so that they are ordered by their
333 // start positions. This is needed for the correctness of the register
334 // allocation algorithm. If two live ranges start at the same offset then there
335 // is a tie breaker based on where the value is first used. This part of the
336 // ordering is merely a heuristic.
338  LifetimePosition start = Start();
339  LifetimePosition other_start = other->Start();
340  if (start.Value() == other_start.Value()) {
341  UsePosition* pos = first_pos();
342  if (pos == NULL) return false;
343  UsePosition* other_pos = other->first_pos();
344  if (other_pos == NULL) return true;
345  return pos->pos().Value() < other_pos->pos().Value();
346  }
347  return start.Value() < other_start.Value();
348 }
349 
350 
352  RegisterAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_,
353  start.Value());
355  DCHECK(first_interval_->start().Value() <= start.Value());
356  DCHECK(start.Value() < first_interval_->end().Value());
357  first_interval_->set_start(start);
358 }
359 
360 
362  Zone* zone) {
363  RegisterAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
364  id_, start.Value(), end.Value());
365  LifetimePosition new_end = end;
366  while (first_interval_ != NULL &&
367  first_interval_->start().Value() <= end.Value()) {
368  if (first_interval_->end().Value() > end.Value()) {
369  new_end = first_interval_->end();
370  }
372  }
373 
374  UseInterval* new_interval = new (zone) UseInterval(start, new_end);
375  new_interval->next_ = first_interval_;
376  first_interval_ = new_interval;
377  if (new_interval->next() == NULL) {
378  last_interval_ = new_interval;
379  }
380 }
381 
382 
384  Zone* zone) {
385  RegisterAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_,
386  start.Value(), end.Value());
387  if (first_interval_ == NULL) {
388  UseInterval* interval = new (zone) UseInterval(start, end);
389  first_interval_ = interval;
390  last_interval_ = interval;
391  } else {
392  if (end.Value() == first_interval_->start().Value()) {
393  first_interval_->set_start(start);
394  } else if (end.Value() < first_interval_->start().Value()) {
395  UseInterval* interval = new (zone) UseInterval(start, end);
396  interval->set_next(first_interval_);
397  first_interval_ = interval;
398  } else {
399  // Order of instruction's processing (see ProcessInstructions) guarantees
400  // that each new use interval either precedes or intersects with
401  // last added interval.
402  DCHECK(start.Value() < first_interval_->end().Value());
405  }
406  }
407 }
408 
409 
411  InstructionOperand* operand,
412  InstructionOperand* hint, Zone* zone) {
413  RegisterAllocator::TraceAlloc("Add to live range %d use position %d\n", id_,
414  pos.Value());
415  UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint);
416  UsePosition* prev_hint = NULL;
417  UsePosition* prev = NULL;
418  UsePosition* current = first_pos_;
419  while (current != NULL && current->pos().Value() < pos.Value()) {
420  prev_hint = current->HasHint() ? current : prev_hint;
421  prev = current;
422  current = current->next();
423  }
424 
425  if (prev == NULL) {
426  use_pos->set_next(first_pos_);
427  first_pos_ = use_pos;
428  } else {
429  use_pos->next_ = prev->next_;
430  prev->next_ = use_pos;
431  }
432 
433  if (prev_hint == NULL && use_pos->HasHint()) {
434  current_hint_operand_ = hint;
435  }
436 }
437 
438 
441  UsePosition* use_pos = first_pos();
442  while (use_pos != NULL) {
443  DCHECK(Start().Value() <= use_pos->pos().Value() &&
444  use_pos->pos().Value() <= End().Value());
445 
446  if (use_pos->HasOperand()) {
447  DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
448  !use_pos->RequiresRegister());
449  use_pos->operand()->ConvertTo(op->kind(), op->index());
450  }
451  use_pos = use_pos->next();
452  }
453 }
454 
455 
456 bool LiveRange::CanCover(LifetimePosition position) const {
457  if (IsEmpty()) return false;
458  return Start().Value() <= position.Value() &&
459  position.Value() < End().Value();
460 }
461 
462 
464  if (!CanCover(position)) return false;
465  UseInterval* start_search = FirstSearchIntervalForPosition(position);
466  for (UseInterval* interval = start_search; interval != NULL;
467  interval = interval->next()) {
468  DCHECK(interval->next() == NULL ||
469  interval->next()->start().Value() >= interval->start().Value());
470  AdvanceLastProcessedMarker(interval, position);
471  if (interval->Contains(position)) return true;
472  if (interval->start().Value() > position.Value()) return false;
473  }
474  return false;
475 }
476 
477 
479  UseInterval* b = other->first_interval();
480  if (b == NULL) return LifetimePosition::Invalid();
481  LifetimePosition advance_last_processed_up_to = b->start();
483  while (a != NULL && b != NULL) {
484  if (a->start().Value() > other->End().Value()) break;
485  if (b->start().Value() > End().Value()) break;
486  LifetimePosition cur_intersection = a->Intersect(b);
487  if (cur_intersection.IsValid()) {
488  return cur_intersection;
489  }
490  if (a->start().Value() < b->start().Value()) {
491  a = a->next();
492  if (a == NULL || a->start().Value() > other->End().Value()) break;
493  AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
494  } else {
495  b = b->next();
496  }
497  }
498  return LifetimePosition::Invalid();
499 }
500 
501 
502 RegisterAllocator::RegisterAllocator(InstructionSequence* code)
503  : zone_(code->isolate()),
504  code_(code),
505  live_in_sets_(code->BasicBlockCount(), zone()),
506  live_ranges_(code->VirtualRegisterCount() * 2, zone()),
507  fixed_live_ranges_(NULL),
508  fixed_double_live_ranges_(NULL),
509  unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
510  active_live_ranges_(8, zone()),
511  inactive_live_ranges_(8, zone()),
512  reusable_slots_(8, zone()),
513  mode_(UNALLOCATED_REGISTERS),
514  num_registers_(-1),
515  allocation_ok_(true) {}
516 
517 
518 void RegisterAllocator::InitializeLivenessAnalysis() {
519  // Initialize the live_in sets for each block to NULL.
520  int block_count = code()->BasicBlockCount();
521  live_in_sets_.Initialize(block_count, zone());
522  live_in_sets_.AddBlock(NULL, block_count, zone());
523 }
524 
525 
526 BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
527  // Compute live out for the given block, except not including backward
528  // successor edges.
529  BitVector* live_out =
530  new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
531 
532  // Process all successor blocks.
533  BasicBlock::Successors successors = block->successors();
534  for (BasicBlock::Successors::iterator i = successors.begin();
535  i != successors.end(); ++i) {
536  // Add values live on entry to the successor. Note the successor's
537  // live_in will not be computed yet for backwards edges.
538  BasicBlock* successor = *i;
539  BitVector* live_in = live_in_sets_[successor->rpo_number_];
540  if (live_in != NULL) live_out->Union(*live_in);
541 
542  // All phi input operands corresponding to this successor edge are live
543  // out from this block.
544  int index = successor->PredecessorIndexOf(block);
545  DCHECK(index >= 0);
546  DCHECK(index < static_cast<int>(successor->PredecessorCount()));
547  for (BasicBlock::const_iterator j = successor->begin();
548  j != successor->end(); ++j) {
549  Node* phi = *j;
550  if (phi->opcode() != IrOpcode::kPhi) continue;
551  Node* input = phi->InputAt(index);
552  live_out->Add(input->id());
553  }
554  }
555 
556  return live_out;
557 }
558 
559 
560 void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
561  BitVector* live_out) {
562  // Add an interval that includes the entire block to the live range for
563  // each live_out value.
564  LifetimePosition start =
565  LifetimePosition::FromInstructionIndex(block->first_instruction_index());
566  LifetimePosition end = LifetimePosition::FromInstructionIndex(
567  block->last_instruction_index()).NextInstruction();
568  BitVector::Iterator iterator(live_out);
569  while (!iterator.Done()) {
570  int operand_index = iterator.Current();
571  LiveRange* range = LiveRangeFor(operand_index);
572  range->AddUseInterval(start, end, zone());
573  iterator.Advance();
574  }
575 }
576 
577 
578 int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
579  return -index - 1 - Register::kMaxNumAllocatableRegisters;
580 }
581 
582 
583 InstructionOperand* RegisterAllocator::AllocateFixed(
584  UnallocatedOperand* operand, int pos, bool is_tagged) {
585  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
586  DCHECK(operand->HasFixedPolicy());
587  if (operand->HasFixedSlotPolicy()) {
588  operand->ConvertTo(InstructionOperand::STACK_SLOT,
589  operand->fixed_slot_index());
590  } else if (operand->HasFixedRegisterPolicy()) {
591  int reg_index = operand->fixed_register_index();
592  operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
593  } else if (operand->HasFixedDoubleRegisterPolicy()) {
594  int reg_index = operand->fixed_register_index();
595  operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
596  } else {
597  UNREACHABLE();
598  }
599  if (is_tagged) {
600  TraceAlloc("Fixed reg is tagged at %d\n", pos);
601  Instruction* instr = InstructionAt(pos);
602  if (instr->HasPointerMap()) {
603  instr->pointer_map()->RecordPointer(operand, code_zone());
604  }
605  }
606  return operand;
607 }
608 
609 
610 LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
612  LiveRange* result = fixed_live_ranges_[index];
613  if (result == NULL) {
614  // TODO(titzer): add a utility method to allocate a new LiveRange:
615  // The LiveRange object itself can go in this zone, but the
616  // InstructionOperand needs
617  // to go in the code zone, since it may survive register allocation.
618  result = new (zone()) LiveRange(FixedLiveRangeID(index), code_zone());
619  DCHECK(result->IsFixed());
620  result->kind_ = GENERAL_REGISTERS;
621  SetLiveRangeAssignedRegister(result, index);
622  fixed_live_ranges_[index] = result;
623  }
624  return result;
625 }
626 
627 
628 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
630  LiveRange* result = fixed_double_live_ranges_[index];
631  if (result == NULL) {
632  result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
633  DCHECK(result->IsFixed());
634  result->kind_ = DOUBLE_REGISTERS;
635  SetLiveRangeAssignedRegister(result, index);
636  fixed_double_live_ranges_[index] = result;
637  }
638  return result;
639 }
640 
641 
642 LiveRange* RegisterAllocator::LiveRangeFor(int index) {
643  if (index >= live_ranges_.length()) {
644  live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
645  }
646  LiveRange* result = live_ranges_[index];
647  if (result == NULL) {
648  result = new (zone()) LiveRange(index, code_zone());
649  live_ranges_[index] = result;
650  }
651  return result;
652 }
653 
654 
655 GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
656  int last_instruction = block->last_instruction_index();
657  return code()->GapAt(last_instruction - 1);
658 }
659 
660 
661 LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
662  if (operand->IsUnallocated()) {
663  return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
664  } else if (operand->IsRegister()) {
665  return FixedLiveRangeFor(operand->index());
666  } else if (operand->IsDoubleRegister()) {
667  return FixedDoubleLiveRangeFor(operand->index());
668  } else {
669  return NULL;
670  }
671 }
672 
673 
674 void RegisterAllocator::Define(LifetimePosition position,
675  InstructionOperand* operand,
676  InstructionOperand* hint) {
677  LiveRange* range = LiveRangeFor(operand);
678  if (range == NULL) return;
679 
680  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
681  // Can happen if there is a definition without use.
682  range->AddUseInterval(position, position.NextInstruction(), zone());
683  range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
684  } else {
685  range->ShortenTo(position);
686  }
687 
688  if (operand->IsUnallocated()) {
689  UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
690  range->AddUsePosition(position, unalloc_operand, hint, zone());
691  }
692 }
693 
694 
695 void RegisterAllocator::Use(LifetimePosition block_start,
696  LifetimePosition position,
697  InstructionOperand* operand,
698  InstructionOperand* hint) {
699  LiveRange* range = LiveRangeFor(operand);
700  if (range == NULL) return;
701  if (operand->IsUnallocated()) {
702  UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
703  range->AddUsePosition(position, unalloc_operand, hint, zone());
704  }
705  range->AddUseInterval(block_start, position, zone());
706 }
707 
708 
709 void RegisterAllocator::AddConstraintsGapMove(int index,
710  InstructionOperand* from,
711  InstructionOperand* to) {
712  GapInstruction* gap = code()->GapAt(index);
713  ParallelMove* move =
714  gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
715  if (from->IsUnallocated()) {
716  const ZoneList<MoveOperands>* move_operands = move->move_operands();
717  for (int i = 0; i < move_operands->length(); ++i) {
718  MoveOperands cur = move_operands->at(i);
719  InstructionOperand* cur_to = cur.destination();
720  if (cur_to->IsUnallocated()) {
721  if (UnallocatedOperand::cast(cur_to)->virtual_register() ==
722  UnallocatedOperand::cast(from)->virtual_register()) {
723  move->AddMove(cur.source(), to, code_zone());
724  return;
725  }
726  }
727  }
728  }
729  move->AddMove(from, to, code_zone());
730 }
731 
732 
733 void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
734  int start = block->first_instruction_index();
735  int end = block->last_instruction_index();
736  DCHECK_NE(-1, start);
737  for (int i = start; i <= end; ++i) {
738  if (code()->IsGapAt(i)) {
739  Instruction* instr = NULL;
740  Instruction* prev_instr = NULL;
741  if (i < end) instr = InstructionAt(i + 1);
742  if (i > start) prev_instr = InstructionAt(i - 1);
743  MeetConstraintsBetween(prev_instr, instr, i);
744  if (!AllocationOk()) return;
745  }
746  }
747 
748  // Meet register constraints for the instruction in the end.
749  if (!code()->IsGapAt(end)) {
750  MeetRegisterConstraintsForLastInstructionInBlock(block);
751  }
752 }
753 
754 
755 void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
756  BasicBlock* block) {
757  int end = block->last_instruction_index();
758  Instruction* last_instruction = InstructionAt(end);
759  for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
760  InstructionOperand* output_operand = last_instruction->OutputAt(i);
761  DCHECK(!output_operand->IsConstant());
762  UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
763  int output_vreg = output->virtual_register();
764  LiveRange* range = LiveRangeFor(output_vreg);
765  bool assigned = false;
766  if (output->HasFixedPolicy()) {
767  AllocateFixed(output, -1, false);
768  // This value is produced on the stack, we never need to spill it.
769  if (output->IsStackSlot()) {
770  range->SetSpillOperand(output);
771  range->SetSpillStartIndex(end);
772  assigned = true;
773  }
774 
775  BasicBlock::Successors successors = block->successors();
776  for (BasicBlock::Successors::iterator succ = successors.begin();
777  succ != successors.end(); ++succ) {
778  DCHECK((*succ)->PredecessorCount() == 1);
779  int gap_index = (*succ)->first_instruction_index() + 1;
780  DCHECK(code()->IsGapAt(gap_index));
781 
782  // Create an unconstrained operand for the same virtual register
783  // and insert a gap move from the fixed output to the operand.
784  UnallocatedOperand* output_copy =
785  new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
786  output_copy->set_virtual_register(output_vreg);
787 
788  code()->AddGapMove(gap_index, output, output_copy);
789  }
790  }
791 
792  if (!assigned) {
793  BasicBlock::Successors successors = block->successors();
794  for (BasicBlock::Successors::iterator succ = successors.begin();
795  succ != successors.end(); ++succ) {
796  DCHECK((*succ)->PredecessorCount() == 1);
797  int gap_index = (*succ)->first_instruction_index() + 1;
798  range->SetSpillStartIndex(gap_index);
799 
800  // This move to spill operand is not a real use. Liveness analysis
801  // and splitting of live ranges do not account for it.
802  // Thus it should be inserted to a lifetime position corresponding to
803  // the instruction end.
804  GapInstruction* gap = code()->GapAt(gap_index);
805  ParallelMove* move =
806  gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
807  move->AddMove(output, range->GetSpillOperand(), code_zone());
808  }
809  }
810  }
811 }
812 
813 
814 void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
815  Instruction* second,
816  int gap_index) {
817  if (first != NULL) {
818  // Handle fixed temporaries.
819  for (size_t i = 0; i < first->TempCount(); i++) {
820  UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
821  if (temp->HasFixedPolicy()) {
822  AllocateFixed(temp, gap_index - 1, false);
823  }
824  }
825 
826  // Handle constant/fixed output operands.
827  for (size_t i = 0; i < first->OutputCount(); i++) {
828  InstructionOperand* output = first->OutputAt(i);
829  if (output->IsConstant()) {
830  int output_vreg = output->index();
831  LiveRange* range = LiveRangeFor(output_vreg);
832  range->SetSpillStartIndex(gap_index - 1);
833  range->SetSpillOperand(output);
834  } else {
835  UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
836  LiveRange* range = LiveRangeFor(first_output->virtual_register());
837  bool assigned = false;
838  if (first_output->HasFixedPolicy()) {
839  UnallocatedOperand* output_copy =
840  first_output->CopyUnconstrained(code_zone());
841  bool is_tagged = HasTaggedValue(first_output->virtual_register());
842  AllocateFixed(first_output, gap_index, is_tagged);
843 
844  // This value is produced on the stack, we never need to spill it.
845  if (first_output->IsStackSlot()) {
846  range->SetSpillOperand(first_output);
847  range->SetSpillStartIndex(gap_index - 1);
848  assigned = true;
849  }
850  code()->AddGapMove(gap_index, first_output, output_copy);
851  }
852 
853  // Make sure we add a gap move for spilling (if we have not done
854  // so already).
855  if (!assigned) {
856  range->SetSpillStartIndex(gap_index);
857 
858  // This move to spill operand is not a real use. Liveness analysis
859  // and splitting of live ranges do not account for it.
860  // Thus it should be inserted to a lifetime position corresponding to
861  // the instruction end.
862  GapInstruction* gap = code()->GapAt(gap_index);
863  ParallelMove* move =
864  gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
865  move->AddMove(first_output, range->GetSpillOperand(), code_zone());
866  }
867  }
868  }
869  }
870 
871  if (second != NULL) {
872  // Handle fixed input operands of second instruction.
873  for (size_t i = 0; i < second->InputCount(); i++) {
874  InstructionOperand* input = second->InputAt(i);
875  if (input->IsImmediate()) continue; // Ignore immediates.
876  UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
877  if (cur_input->HasFixedPolicy()) {
878  UnallocatedOperand* input_copy =
879  cur_input->CopyUnconstrained(code_zone());
880  bool is_tagged = HasTaggedValue(cur_input->virtual_register());
881  AllocateFixed(cur_input, gap_index + 1, is_tagged);
882  AddConstraintsGapMove(gap_index, input_copy, cur_input);
883  }
884  }
885 
886  // Handle "output same as input" for second instruction.
887  for (size_t i = 0; i < second->OutputCount(); i++) {
888  InstructionOperand* output = second->OutputAt(i);
889  if (!output->IsUnallocated()) continue;
890  UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
891  if (second_output->HasSameAsInputPolicy()) {
892  DCHECK(i == 0); // Only valid for first output.
893  UnallocatedOperand* cur_input =
894  UnallocatedOperand::cast(second->InputAt(0));
895  int output_vreg = second_output->virtual_register();
896  int input_vreg = cur_input->virtual_register();
897 
898  UnallocatedOperand* input_copy =
899  cur_input->CopyUnconstrained(code_zone());
900  cur_input->set_virtual_register(second_output->virtual_register());
901  AddConstraintsGapMove(gap_index, input_copy, cur_input);
902 
903  if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
904  int index = gap_index + 1;
905  Instruction* instr = InstructionAt(index);
906  if (instr->HasPointerMap()) {
907  instr->pointer_map()->RecordPointer(input_copy, code_zone());
908  }
909  } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
910  // The input is assumed to immediately have a tagged representation,
911  // before the pointer map can be used. I.e. the pointer map at the
912  // instruction will include the output operand (whose value at the
913  // beginning of the instruction is equal to the input operand). If
914  // this is not desired, then the pointer map at this instruction needs
915  // to be adjusted manually.
916  }
917  }
918  }
919  }
920 }
921 
922 
923 bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
924  for (size_t i = 0; i < instr->OutputCount(); i++) {
925  InstructionOperand* output = instr->OutputAt(i);
926  if (output->IsRegister() && output->index() == index) return true;
927  }
928  return false;
929 }
930 
931 
932 bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
933  int index) {
934  for (size_t i = 0; i < instr->OutputCount(); i++) {
935  InstructionOperand* output = instr->OutputAt(i);
936  if (output->IsDoubleRegister() && output->index() == index) return true;
937  }
938  return false;
939 }
940 
941 
942 void RegisterAllocator::ProcessInstructions(BasicBlock* block,
943  BitVector* live) {
944  int block_start = block->first_instruction_index();
945 
946  LifetimePosition block_start_position =
948 
949  for (int index = block->last_instruction_index(); index >= block_start;
950  index--) {
951  LifetimePosition curr_position =
953 
954  Instruction* instr = InstructionAt(index);
955  DCHECK(instr != NULL);
956  if (instr->IsGapMoves()) {
957  // Process the moves of the gap instruction, making their sources live.
958  GapInstruction* gap = code()->GapAt(index);
959 
960  // TODO(titzer): no need to create the parallel move if it doesn't exist.
961  ParallelMove* move =
962  gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
963  const ZoneList<MoveOperands>* move_operands = move->move_operands();
964  for (int i = 0; i < move_operands->length(); ++i) {
965  MoveOperands* cur = &move_operands->at(i);
966  if (cur->IsIgnored()) continue;
967  InstructionOperand* from = cur->source();
968  InstructionOperand* to = cur->destination();
969  InstructionOperand* hint = to;
970  if (to->IsUnallocated()) {
971  int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
972  LiveRange* to_range = LiveRangeFor(to_vreg);
973  if (to_range->is_phi()) {
974  if (to_range->is_non_loop_phi()) {
975  hint = to_range->current_hint_operand();
976  }
977  } else {
978  if (live->Contains(to_vreg)) {
979  Define(curr_position, to, from);
980  live->Remove(to_vreg);
981  } else {
982  cur->Eliminate();
983  continue;
984  }
985  }
986  } else {
987  Define(curr_position, to, from);
988  }
989  Use(block_start_position, curr_position, from, hint);
990  if (from->IsUnallocated()) {
991  live->Add(UnallocatedOperand::cast(from)->virtual_register());
992  }
993  }
994  } else {
995  // Process output, inputs, and temps of this non-gap instruction.
996  for (size_t i = 0; i < instr->OutputCount(); i++) {
997  InstructionOperand* output = instr->OutputAt(i);
998  if (output->IsUnallocated()) {
999  int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
1000  live->Remove(out_vreg);
1001  } else if (output->IsConstant()) {
1002  int out_vreg = output->index();
1003  live->Remove(out_vreg);
1004  }
1005  Define(curr_position, output, NULL);
1006  }
1007 
1008  if (instr->ClobbersRegisters()) {
1009  for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
1010  if (!IsOutputRegisterOf(instr, i)) {
1011  LiveRange* range = FixedLiveRangeFor(i);
1012  range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1013  zone());
1014  }
1015  }
1016  }
1017 
1018  if (instr->ClobbersDoubleRegisters()) {
1020  ++i) {
1021  if (!IsOutputDoubleRegisterOf(instr, i)) {
1022  LiveRange* range = FixedDoubleLiveRangeFor(i);
1023  range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
1024  zone());
1025  }
1026  }
1027  }
1028 
1029  for (size_t i = 0; i < instr->InputCount(); i++) {
1030  InstructionOperand* input = instr->InputAt(i);
1031  if (input->IsImmediate()) continue; // Ignore immediates.
1032  LifetimePosition use_pos;
1033  if (input->IsUnallocated() &&
1035  use_pos = curr_position;
1036  } else {
1037  use_pos = curr_position.InstructionEnd();
1038  }
1039 
1040  Use(block_start_position, use_pos, input, NULL);
1041  if (input->IsUnallocated()) {
1042  live->Add(UnallocatedOperand::cast(input)->virtual_register());
1043  }
1044  }
1045 
1046  for (size_t i = 0; i < instr->TempCount(); i++) {
1047  InstructionOperand* temp = instr->TempAt(i);
1048  if (instr->ClobbersTemps()) {
1049  if (temp->IsRegister()) continue;
1050  if (temp->IsUnallocated()) {
1051  UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
1052  if (temp_unalloc->HasFixedPolicy()) {
1053  continue;
1054  }
1055  }
1056  }
1057  Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
1058  Define(curr_position, temp, NULL);
1059  }
1060  }
1061  }
1062 }
1063 
1064 
1065 void RegisterAllocator::ResolvePhis(BasicBlock* block) {
1066  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
1067  Node* phi = *i;
1068  if (phi->opcode() != IrOpcode::kPhi) continue;
1069 
1070  UnallocatedOperand* phi_operand =
1071  new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
1072  phi_operand->set_virtual_register(phi->id());
1073 
1074  int j = 0;
1075  Node::Inputs inputs = phi->inputs();
1076  for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
1077  ++iter, ++j) {
1078  Node* op = *iter;
1079  // TODO(mstarzinger): Use a ValueInputIterator instead.
1080  if (j >= block->PredecessorCount()) continue;
1081  UnallocatedOperand* operand =
1082  new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
1083  operand->set_virtual_register(op->id());
1084  BasicBlock* cur_block = block->PredecessorAt(j);
1085  // The gap move must be added without any special processing as in
1086  // the AddConstraintsGapMove.
1087  code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
1088  phi_operand);
1089 
1090  Instruction* branch = InstructionAt(cur_block->last_instruction_index());
1091  DCHECK(!branch->HasPointerMap());
1092  USE(branch);
1093  }
1094 
1095  LiveRange* live_range = LiveRangeFor(phi->id());
1096  BlockStartInstruction* block_start = code()->GetBlockStart(block);
1097  block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
1098  ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
1099  live_range->SetSpillStartIndex(block->first_instruction_index());
1100 
1101  // We use the phi-ness of some nodes in some later heuristics.
1102  live_range->set_is_phi(true);
1103  if (!block->IsLoopHeader()) {
1104  live_range->set_is_non_loop_phi(true);
1105  }
1106  }
1107 }
1108 
1109 
1110 bool RegisterAllocator::Allocate() {
1111  assigned_registers_ = new (code_zone())
1112  BitVector(Register::NumAllocatableRegisters(), code_zone());
1113  assigned_double_registers_ = new (code_zone())
1114  BitVector(DoubleRegister::NumAllocatableAliasedRegisters(), code_zone());
1115  MeetRegisterConstraints();
1116  if (!AllocationOk()) return false;
1117  ResolvePhis();
1118  BuildLiveRanges();
1119  AllocateGeneralRegisters();
1120  if (!AllocationOk()) return false;
1121  AllocateDoubleRegisters();
1122  if (!AllocationOk()) return false;
1123  PopulatePointerMaps();
1124  ConnectRanges();
1125  ResolveControlFlow();
1126  code()->frame()->SetAllocatedRegisters(assigned_registers_);
1127  code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1128  return true;
1129 }
1130 
1131 
1132 void RegisterAllocator::MeetRegisterConstraints() {
1133  RegisterAllocatorPhase phase("L_Register constraints", this);
1134  for (int i = 0; i < code()->BasicBlockCount(); ++i) {
1135  MeetRegisterConstraints(code()->BlockAt(i));
1136  if (!AllocationOk()) return;
1137  }
1138 }
1139 
1140 
1141 void RegisterAllocator::ResolvePhis() {
1142  RegisterAllocatorPhase phase("L_Resolve phis", this);
1143 
1144  // Process the blocks in reverse order.
1145  for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) {
1146  ResolvePhis(code()->BlockAt(i));
1147  }
1148 }
1149 
1150 
1151 void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
1152  BasicBlock* pred) {
1153  LifetimePosition pred_end =
1154  LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
1155  LifetimePosition cur_start =
1156  LifetimePosition::FromInstructionIndex(block->first_instruction_index());
1157  LiveRange* pred_cover = NULL;
1158  LiveRange* cur_cover = NULL;
1159  LiveRange* cur_range = range;
1160  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
1161  if (cur_range->CanCover(cur_start)) {
1162  DCHECK(cur_cover == NULL);
1163  cur_cover = cur_range;
1164  }
1165  if (cur_range->CanCover(pred_end)) {
1166  DCHECK(pred_cover == NULL);
1167  pred_cover = cur_range;
1168  }
1169  cur_range = cur_range->next();
1170  }
1171 
1172  if (cur_cover->IsSpilled()) return;
1173  DCHECK(pred_cover != NULL && cur_cover != NULL);
1174  if (pred_cover != cur_cover) {
1175  InstructionOperand* pred_op =
1176  pred_cover->CreateAssignedOperand(code_zone());
1177  InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
1178  if (!pred_op->Equals(cur_op)) {
1179  GapInstruction* gap = NULL;
1180  if (block->PredecessorCount() == 1) {
1181  gap = code()->GapAt(block->first_instruction_index());
1182  } else {
1183  DCHECK(pred->SuccessorCount() == 1);
1184  gap = GetLastGap(pred);
1185 
1186  Instruction* branch = InstructionAt(pred->last_instruction_index());
1187  DCHECK(!branch->HasPointerMap());
1188  USE(branch);
1189  }
1190  gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
1191  ->AddMove(pred_op, cur_op, code_zone());
1192  }
1193  }
1194 }
1195 
1196 
1197 ParallelMove* RegisterAllocator::GetConnectingParallelMove(
1198  LifetimePosition pos) {
1199  int index = pos.InstructionIndex();
1200  if (code()->IsGapAt(index)) {
1201  GapInstruction* gap = code()->GapAt(index);
1202  return gap->GetOrCreateParallelMove(
1203  pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
1204  code_zone());
1205  }
1206  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
1207  return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
1208  (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE,
1209  code_zone());
1210 }
1211 
1212 
1213 BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
1214  return code()->GetBasicBlock(pos.InstructionIndex());
1215 }
1216 
1217 
1218 void RegisterAllocator::ConnectRanges() {
1219  RegisterAllocatorPhase phase("L_Connect ranges", this);
1220  for (int i = 0; i < live_ranges()->length(); ++i) {
1221  LiveRange* first_range = live_ranges()->at(i);
1222  if (first_range == NULL || first_range->parent() != NULL) continue;
1223 
1224  LiveRange* second_range = first_range->next();
1225  while (second_range != NULL) {
1226  LifetimePosition pos = second_range->Start();
1227 
1228  if (!second_range->IsSpilled()) {
1229  // Add gap move if the two live ranges touch and there is no block
1230  // boundary.
1231  if (first_range->End().Value() == pos.Value()) {
1232  bool should_insert = true;
1233  if (IsBlockBoundary(pos)) {
1234  should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
1235  }
1236  if (should_insert) {
1237  ParallelMove* move = GetConnectingParallelMove(pos);
1238  InstructionOperand* prev_operand =
1239  first_range->CreateAssignedOperand(code_zone());
1240  InstructionOperand* cur_operand =
1241  second_range->CreateAssignedOperand(code_zone());
1242  move->AddMove(prev_operand, cur_operand, code_zone());
1243  }
1244  }
1245  }
1246 
1247  first_range = second_range;
1248  second_range = second_range->next();
1249  }
1250  }
1251 }
1252 
1253 
1254 bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block) const {
1255  if (block->PredecessorCount() != 1) return false;
1256  return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1;
1257 }
1258 
1259 
1260 void RegisterAllocator::ResolveControlFlow() {
1261  RegisterAllocatorPhase phase("L_Resolve control flow", this);
1262  for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) {
1263  BasicBlock* block = code()->BlockAt(block_id);
1264  if (CanEagerlyResolveControlFlow(block)) continue;
1265  BitVector* live = live_in_sets_[block->rpo_number_];
1266  BitVector::Iterator iterator(live);
1267  while (!iterator.Done()) {
1268  int operand_index = iterator.Current();
1269  BasicBlock::Predecessors predecessors = block->predecessors();
1270  for (BasicBlock::Predecessors::iterator i = predecessors.begin();
1271  i != predecessors.end(); ++i) {
1272  BasicBlock* cur = *i;
1273  LiveRange* cur_range = LiveRangeFor(operand_index);
1274  ResolveControlFlow(cur_range, block, cur);
1275  }
1276  iterator.Advance();
1277  }
1278  }
1279 }
1280 
1281 
1282 void RegisterAllocator::BuildLiveRanges() {
1283  RegisterAllocatorPhase phase("L_Build live ranges", this);
1284  InitializeLivenessAnalysis();
1285  // Process the blocks in reverse order.
1286  for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
1287  --block_id) {
1288  BasicBlock* block = code()->BlockAt(block_id);
1289  BitVector* live = ComputeLiveOut(block);
1290  // Initially consider all live_out values live for the entire block. We
1291  // will shorten these intervals if necessary.
1292  AddInitialIntervals(block, live);
1293 
1294  // Process the instructions in reverse order, generating and killing
1295  // live values.
1296  ProcessInstructions(block, live);
1297  // All phi output operands are killed by this block.
1298  for (BasicBlock::const_iterator i = block->begin(); i != block->end();
1299  ++i) {
1300  Node* phi = *i;
1301  if (phi->opcode() != IrOpcode::kPhi) continue;
1302 
1303  // The live range interval already ends at the first instruction of the
1304  // block.
1305  live->Remove(phi->id());
1306 
1307  InstructionOperand* hint = NULL;
1308  InstructionOperand* phi_operand = NULL;
1309  GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
1310 
1311  // TODO(titzer): no need to create the parallel move if it doesn't exit.
1312  ParallelMove* move =
1313  gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
1314  for (int j = 0; j < move->move_operands()->length(); ++j) {
1315  InstructionOperand* to = move->move_operands()->at(j).destination();
1316  if (to->IsUnallocated() &&
1317  UnallocatedOperand::cast(to)->virtual_register() == phi->id()) {
1318  hint = move->move_operands()->at(j).source();
1319  phi_operand = to;
1320  break;
1321  }
1322  }
1323  DCHECK(hint != NULL);
1324 
1325  LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
1326  block->first_instruction_index());
1327  Define(block_start, phi_operand, hint);
1328  }
1329 
1330  // Now live is live_in for this block except not including values live
1331  // out on backward successor edges.
1332  live_in_sets_[block_id] = live;
1333 
1334  if (block->IsLoopHeader()) {
1335  // Add a live range stretching from the first loop instruction to the last
1336  // for each value live on entry to the header.
1337  BitVector::Iterator iterator(live);
1338  LifetimePosition start = LifetimePosition::FromInstructionIndex(
1339  block->first_instruction_index());
1340  int end_index =
1341  code()->BlockAt(block->loop_end_)->last_instruction_index();
1342  LifetimePosition end =
1344  while (!iterator.Done()) {
1345  int operand_index = iterator.Current();
1346  LiveRange* range = LiveRangeFor(operand_index);
1347  range->EnsureInterval(start, end, zone());
1348  iterator.Advance();
1349  }
1350 
1351  // Insert all values into the live in sets of all blocks in the loop.
1352  for (int i = block->rpo_number_ + 1; i < block->loop_end_; ++i) {
1353  live_in_sets_[i]->Union(*live);
1354  }
1355  }
1356 
1357 #ifdef DEBUG
1358  if (block_id == 0) {
1359  BitVector::Iterator iterator(live);
1360  bool found = false;
1361  while (!iterator.Done()) {
1362  found = true;
1363  int operand_index = iterator.Current();
1364  PrintF("Register allocator error: live v%d reached first block.\n",
1365  operand_index);
1366  LiveRange* range = LiveRangeFor(operand_index);
1367  PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value());
1368  CompilationInfo* info = code()->linkage()->info();
1369  if (info->IsStub()) {
1370  if (info->code_stub() == NULL) {
1371  PrintF("\n");
1372  } else {
1373  CodeStub::Major major_key = info->code_stub()->MajorKey();
1374  PrintF(" (function: %s)\n", CodeStub::MajorName(major_key, false));
1375  }
1376  } else {
1377  DCHECK(info->IsOptimizing());
1378  AllowHandleDereference allow_deref;
1379  PrintF(" (function: %s)\n",
1380  info->function()->debug_name()->ToCString().get());
1381  }
1382  iterator.Advance();
1383  }
1384  DCHECK(!found);
1385  }
1386 #endif
1387  }
1388 
1389  for (int i = 0; i < live_ranges_.length(); ++i) {
1390  if (live_ranges_[i] != NULL) {
1391  live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
1392 
1393  // TODO(bmeurer): This is a horrible hack to make sure that for constant
1394  // live ranges, every use requires the constant to be in a register.
1395  // Without this hack, all uses with "any" policy would get the constant
1396  // operand assigned.
1397  LiveRange* range = live_ranges_[i];
1398  if (range->HasAllocatedSpillOperand() &&
1399  range->GetSpillOperand()->IsConstant()) {
1400  for (UsePosition* pos = range->first_pos(); pos != NULL;
1401  pos = pos->next_) {
1402  pos->register_beneficial_ = true;
1403  pos->requires_reg_ = true;
1404  }
1405  }
1406  }
1407  }
1408 }
1409 
1410 
1411 bool RegisterAllocator::SafePointsAreInOrder() const {
1412  int safe_point = 0;
1413  const PointerMapDeque* pointer_maps = code()->pointer_maps();
1414  for (PointerMapDeque::const_iterator it = pointer_maps->begin();
1415  it != pointer_maps->end(); ++it) {
1416  PointerMap* map = *it;
1417  if (safe_point > map->instruction_position()) return false;
1418  safe_point = map->instruction_position();
1419  }
1420  return true;
1421 }
1422 
1423 
1424 void RegisterAllocator::PopulatePointerMaps() {
1425  RegisterAllocatorPhase phase("L_Populate pointer maps", this);
1426 
1427  DCHECK(SafePointsAreInOrder());
1428 
1429  // Iterate over all safe point positions and record a pointer
1430  // for all spilled live ranges at this point.
1431  int last_range_start = 0;
1432  const PointerMapDeque* pointer_maps = code()->pointer_maps();
1433  PointerMapDeque::const_iterator first_it = pointer_maps->begin();
1434  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
1435  LiveRange* range = live_ranges()->at(range_idx);
1436  if (range == NULL) continue;
1437  // Iterate over the first parts of multi-part live ranges.
1438  if (range->parent() != NULL) continue;
1439  // Skip non-reference values.
1440  if (!HasTaggedValue(range->id())) continue;
1441  // Skip empty live ranges.
1442  if (range->IsEmpty()) continue;
1443 
1444  // Find the extent of the range and its children.
1445  int start = range->Start().InstructionIndex();
1446  int end = 0;
1447  for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
1448  LifetimePosition this_end = cur->End();
1449  if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
1450  DCHECK(cur->Start().InstructionIndex() >= start);
1451  }
1452 
1453  // Most of the ranges are in order, but not all. Keep an eye on when they
1454  // step backwards and reset the first_it so we don't miss any safe points.
1455  if (start < last_range_start) first_it = pointer_maps->begin();
1456  last_range_start = start;
1457 
1458  // Step across all the safe points that are before the start of this range,
1459  // recording how far we step in order to save doing this for the next range.
1460  for (; first_it != pointer_maps->end(); ++first_it) {
1461  PointerMap* map = *first_it;
1462  if (map->instruction_position() >= start) break;
1463  }
1464 
1465  // Step through the safe points to see whether they are in the range.
1466  for (PointerMapDeque::const_iterator it = first_it;
1467  it != pointer_maps->end(); ++it) {
1468  PointerMap* map = *it;
1469  int safe_point = map->instruction_position();
1470 
1471  // The safe points are sorted so we can stop searching here.
1472  if (safe_point - 1 > end) break;
1473 
1474  // Advance to the next active range that covers the current
1475  // safe point position.
1476  LifetimePosition safe_point_pos =
1478  LiveRange* cur = range;
1479  while (cur != NULL && !cur->Covers(safe_point_pos)) {
1480  cur = cur->next();
1481  }
1482  if (cur == NULL) continue;
1483 
1484  // Check if the live range is spilled and the safe point is after
1485  // the spill position.
1486  if (range->HasAllocatedSpillOperand() &&
1487  safe_point >= range->spill_start_index() &&
1488  !range->GetSpillOperand()->IsConstant()) {
1489  TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
1490  range->id(), range->spill_start_index(), safe_point);
1491  map->RecordPointer(range->GetSpillOperand(), code_zone());
1492  }
1493 
1494  if (!cur->IsSpilled()) {
1495  TraceAlloc(
1496  "Pointer in register for range %d (start at %d) "
1497  "at safe point %d\n",
1498  cur->id(), cur->Start().Value(), safe_point);
1499  InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
1500  DCHECK(!operand->IsStackSlot());
1501  map->RecordPointer(operand, code_zone());
1502  }
1503  }
1504  }
1505 }
1506 
1507 
1508 void RegisterAllocator::AllocateGeneralRegisters() {
1509  RegisterAllocatorPhase phase("L_Allocate general registers", this);
1510  num_registers_ = Register::NumAllocatableRegisters();
1511  mode_ = GENERAL_REGISTERS;
1512  AllocateRegisters();
1513 }
1514 
1515 
1516 void RegisterAllocator::AllocateDoubleRegisters() {
1517  RegisterAllocatorPhase phase("L_Allocate double registers", this);
1519  mode_ = DOUBLE_REGISTERS;
1520  AllocateRegisters();
1521 }
1522 
1523 
1524 void RegisterAllocator::AllocateRegisters() {
1525  DCHECK(unhandled_live_ranges_.is_empty());
1526 
1527  for (int i = 0; i < live_ranges_.length(); ++i) {
1528  if (live_ranges_[i] != NULL) {
1529  if (live_ranges_[i]->Kind() == mode_) {
1530  AddToUnhandledUnsorted(live_ranges_[i]);
1531  }
1532  }
1533  }
1534  SortUnhandled();
1535  DCHECK(UnhandledIsSorted());
1536 
1537  DCHECK(reusable_slots_.is_empty());
1538  DCHECK(active_live_ranges_.is_empty());
1539  DCHECK(inactive_live_ranges_.is_empty());
1540 
1541  if (mode_ == DOUBLE_REGISTERS) {
1542  for (int i = 0; i < DoubleRegister::NumAllocatableAliasedRegisters(); ++i) {
1543  LiveRange* current = fixed_double_live_ranges_.at(i);
1544  if (current != NULL) {
1545  AddToInactive(current);
1546  }
1547  }
1548  } else {
1549  DCHECK(mode_ == GENERAL_REGISTERS);
1550  for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
1551  LiveRange* current = fixed_live_ranges_.at(i);
1552  if (current != NULL) {
1553  AddToInactive(current);
1554  }
1555  }
1556  }
1557 
1558  while (!unhandled_live_ranges_.is_empty()) {
1559  DCHECK(UnhandledIsSorted());
1560  LiveRange* current = unhandled_live_ranges_.RemoveLast();
1561  DCHECK(UnhandledIsSorted());
1562  LifetimePosition position = current->Start();
1563 #ifdef DEBUG
1564  allocation_finger_ = position;
1565 #endif
1566  TraceAlloc("Processing interval %d start=%d\n", current->id(),
1567  position.Value());
1568 
1569  if (current->HasAllocatedSpillOperand()) {
1570  TraceAlloc("Live range %d already has a spill operand\n", current->id());
1571  LifetimePosition next_pos = position;
1572  if (code()->IsGapAt(next_pos.InstructionIndex())) {
1573  next_pos = next_pos.NextInstruction();
1574  }
1575  UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
1576  // If the range already has a spill operand and it doesn't need a
1577  // register immediately, split it and spill the first part of the range.
1578  if (pos == NULL) {
1579  Spill(current);
1580  continue;
1581  } else if (pos->pos().Value() >
1582  current->Start().NextInstruction().Value()) {
1583  // Do not spill live range eagerly if use position that can benefit from
1584  // the register is too close to the start of live range.
1585  SpillBetween(current, current->Start(), pos->pos());
1586  if (!AllocationOk()) return;
1587  DCHECK(UnhandledIsSorted());
1588  continue;
1589  }
1590  }
1591 
1592  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1593  LiveRange* cur_active = active_live_ranges_.at(i);
1594  if (cur_active->End().Value() <= position.Value()) {
1595  ActiveToHandled(cur_active);
1596  --i; // The live range was removed from the list of active live ranges.
1597  } else if (!cur_active->Covers(position)) {
1598  ActiveToInactive(cur_active);
1599  --i; // The live range was removed from the list of active live ranges.
1600  }
1601  }
1602 
1603  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
1604  LiveRange* cur_inactive = inactive_live_ranges_.at(i);
1605  if (cur_inactive->End().Value() <= position.Value()) {
1606  InactiveToHandled(cur_inactive);
1607  --i; // Live range was removed from the list of inactive live ranges.
1608  } else if (cur_inactive->Covers(position)) {
1609  InactiveToActive(cur_inactive);
1610  --i; // Live range was removed from the list of inactive live ranges.
1611  }
1612  }
1613 
1614  DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
1615 
1616  bool result = TryAllocateFreeReg(current);
1617  if (!AllocationOk()) return;
1618 
1619  if (!result) AllocateBlockedReg(current);
1620  if (!AllocationOk()) return;
1621 
1622  if (current->HasRegisterAssigned()) {
1623  AddToActive(current);
1624  }
1625  }
1626 
1627  reusable_slots_.Rewind(0);
1628  active_live_ranges_.Rewind(0);
1629  inactive_live_ranges_.Rewind(0);
1630 }
1631 
1632 
1633 const char* RegisterAllocator::RegisterName(int allocation_index) {
1634  if (mode_ == GENERAL_REGISTERS) {
1635  return Register::AllocationIndexToString(allocation_index);
1636  } else {
1637  return DoubleRegister::AllocationIndexToString(allocation_index);
1638  }
1639 }
1640 
1641 
1642 void RegisterAllocator::TraceAlloc(const char* msg, ...) {
1643  if (FLAG_trace_alloc) {
1644  va_list arguments;
1645  va_start(arguments, msg);
1646  base::OS::VPrint(msg, arguments);
1647  va_end(arguments);
1648  }
1649 }
1650 
1651 
1652 bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
1653  return code()->IsReference(virtual_register);
1654 }
1655 
1656 
1657 RegisterKind RegisterAllocator::RequiredRegisterKind(
1658  int virtual_register) const {
1659  return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
1661 }
1662 
1663 
1664 void RegisterAllocator::AddToActive(LiveRange* range) {
1665  TraceAlloc("Add live range %d to active\n", range->id());
1666  active_live_ranges_.Add(range, zone());
1667 }
1668 
1669 
1670 void RegisterAllocator::AddToInactive(LiveRange* range) {
1671  TraceAlloc("Add live range %d to inactive\n", range->id());
1672  inactive_live_ranges_.Add(range, zone());
1673 }
1674 
1675 
1676 void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
1677  if (range == NULL || range->IsEmpty()) return;
1678  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
1679  DCHECK(allocation_finger_.Value() <= range->Start().Value());
1680  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
1681  LiveRange* cur_range = unhandled_live_ranges_.at(i);
1682  if (range->ShouldBeAllocatedBefore(cur_range)) {
1683  TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
1684  unhandled_live_ranges_.InsertAt(i + 1, range, zone());
1685  DCHECK(UnhandledIsSorted());
1686  return;
1687  }
1688  }
1689  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
1690  unhandled_live_ranges_.InsertAt(0, range, zone());
1691  DCHECK(UnhandledIsSorted());
1692 }
1693 
1694 
1695 void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
1696  if (range == NULL || range->IsEmpty()) return;
1697  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
1698  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
1699  unhandled_live_ranges_.Add(range, zone());
1700 }
1701 
1702 
1703 static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
1704  DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
1705  !(*b)->ShouldBeAllocatedBefore(*a));
1706  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
1707  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
1708  return (*a)->id() - (*b)->id();
1709 }
1710 
1711 
1712 // Sort the unhandled live ranges so that the ranges to be processed first are
1713 // at the end of the array list. This is convenient for the register allocation
1714 // algorithm because it is efficient to remove elements from the end.
1715 void RegisterAllocator::SortUnhandled() {
1716  TraceAlloc("Sort unhandled\n");
1717  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
1718 }
1719 
1720 
1721 bool RegisterAllocator::UnhandledIsSorted() {
1722  int len = unhandled_live_ranges_.length();
1723  for (int i = 1; i < len; i++) {
1724  LiveRange* a = unhandled_live_ranges_.at(i - 1);
1725  LiveRange* b = unhandled_live_ranges_.at(i);
1726  if (a->Start().Value() < b->Start().Value()) return false;
1727  }
1728  return true;
1729 }
1730 
1731 
1732 void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
1733  // Check that we are the last range.
1734  if (range->next() != NULL) return;
1735 
1736  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
1737 
1738  InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
1739  if (spill_operand->IsConstant()) return;
1740  if (spill_operand->index() >= 0) {
1741  reusable_slots_.Add(range, zone());
1742  }
1743 }
1744 
1745 
1746 InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
1747  if (reusable_slots_.is_empty()) return NULL;
1748  if (reusable_slots_.first()->End().Value() >
1749  range->TopLevel()->Start().Value()) {
1750  return NULL;
1751  }
1752  InstructionOperand* result =
1753  reusable_slots_.first()->TopLevel()->GetSpillOperand();
1754  reusable_slots_.Remove(0);
1755  return result;
1756 }
1757 
1758 
1759 void RegisterAllocator::ActiveToHandled(LiveRange* range) {
1760  DCHECK(active_live_ranges_.Contains(range));
1761  active_live_ranges_.RemoveElement(range);
1762  TraceAlloc("Moving live range %d from active to handled\n", range->id());
1763  FreeSpillSlot(range);
1764 }
1765 
1766 
1767 void RegisterAllocator::ActiveToInactive(LiveRange* range) {
1768  DCHECK(active_live_ranges_.Contains(range));
1769  active_live_ranges_.RemoveElement(range);
1770  inactive_live_ranges_.Add(range, zone());
1771  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
1772 }
1773 
1774 
1775 void RegisterAllocator::InactiveToHandled(LiveRange* range) {
1776  DCHECK(inactive_live_ranges_.Contains(range));
1777  inactive_live_ranges_.RemoveElement(range);
1778  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
1779  FreeSpillSlot(range);
1780 }
1781 
1782 
1783 void RegisterAllocator::InactiveToActive(LiveRange* range) {
1784  DCHECK(inactive_live_ranges_.Contains(range));
1785  inactive_live_ranges_.RemoveElement(range);
1786  active_live_ranges_.Add(range, zone());
1787  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
1788 }
1789 
1790 
1791 // TryAllocateFreeReg and AllocateBlockedReg assume this
1792 // when allocating local arrays.
1795 
1796 
1797 bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
1799 
1800  for (int i = 0; i < num_registers_; i++) {
1801  free_until_pos[i] = LifetimePosition::MaxPosition();
1802  }
1803 
1804  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1805  LiveRange* cur_active = active_live_ranges_.at(i);
1806  free_until_pos[cur_active->assigned_register()] =
1808  }
1809 
1810  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
1811  LiveRange* cur_inactive = inactive_live_ranges_.at(i);
1812  DCHECK(cur_inactive->End().Value() > current->Start().Value());
1813  LifetimePosition next_intersection =
1814  cur_inactive->FirstIntersection(current);
1815  if (!next_intersection.IsValid()) continue;
1816  int cur_reg = cur_inactive->assigned_register();
1817  free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
1818  }
1819 
1820  InstructionOperand* hint = current->FirstHint();
1821  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
1822  int register_index = hint->index();
1823  TraceAlloc(
1824  "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
1825  RegisterName(register_index), free_until_pos[register_index].Value(),
1826  current->id(), current->End().Value());
1827 
1828  // The desired register is free until the end of the current live range.
1829  if (free_until_pos[register_index].Value() >= current->End().Value()) {
1830  TraceAlloc("Assigning preferred reg %s to live range %d\n",
1831  RegisterName(register_index), current->id());
1832  SetLiveRangeAssignedRegister(current, register_index);
1833  return true;
1834  }
1835  }
1836 
1837  // Find the register which stays free for the longest time.
1838  int reg = 0;
1839  for (int i = 1; i < RegisterCount(); ++i) {
1840  if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
1841  reg = i;
1842  }
1843  }
1844 
1845  LifetimePosition pos = free_until_pos[reg];
1846 
1847  if (pos.Value() <= current->Start().Value()) {
1848  // All registers are blocked.
1849  return false;
1850  }
1851 
1852  if (pos.Value() < current->End().Value()) {
1853  // Register reg is available at the range start but becomes blocked before
1854  // the range end. Split current at position where it becomes blocked.
1855  LiveRange* tail = SplitRangeAt(current, pos);
1856  if (!AllocationOk()) return false;
1857  AddToUnhandledSorted(tail);
1858  }
1859 
1860 
1861  // Register reg is available at the range start and is free until
1862  // the range end.
1863  DCHECK(pos.Value() >= current->End().Value());
1864  TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
1865  current->id());
1866  SetLiveRangeAssignedRegister(current, reg);
1867 
1868  return true;
1869 }
1870 
1871 
1872 void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
1873  UsePosition* register_use = current->NextRegisterPosition(current->Start());
1874  if (register_use == NULL) {
1875  // There is no use in the current live range that requires a register.
1876  // We can just spill it.
1877  Spill(current);
1878  return;
1879  }
1880 
1881 
1882  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
1883  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
1884 
1885  for (int i = 0; i < num_registers_; i++) {
1886  use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
1887  }
1888 
1889  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1890  LiveRange* range = active_live_ranges_[i];
1891  int cur_reg = range->assigned_register();
1892  if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
1893  block_pos[cur_reg] = use_pos[cur_reg] =
1895  } else {
1896  UsePosition* next_use =
1897  range->NextUsePositionRegisterIsBeneficial(current->Start());
1898  if (next_use == NULL) {
1899  use_pos[cur_reg] = range->End();
1900  } else {
1901  use_pos[cur_reg] = next_use->pos();
1902  }
1903  }
1904  }
1905 
1906  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
1907  LiveRange* range = inactive_live_ranges_.at(i);
1908  DCHECK(range->End().Value() > current->Start().Value());
1909  LifetimePosition next_intersection = range->FirstIntersection(current);
1910  if (!next_intersection.IsValid()) continue;
1911  int cur_reg = range->assigned_register();
1912  if (range->IsFixed()) {
1913  block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
1914  use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
1915  } else {
1916  use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
1917  }
1918  }
1919 
1920  int reg = 0;
1921  for (int i = 1; i < RegisterCount(); ++i) {
1922  if (use_pos[i].Value() > use_pos[reg].Value()) {
1923  reg = i;
1924  }
1925  }
1926 
1927  LifetimePosition pos = use_pos[reg];
1928 
1929  if (pos.Value() < register_use->pos().Value()) {
1930  // All registers are blocked before the first use that requires a register.
1931  // Spill starting part of live range up to that use.
1932  SpillBetween(current, current->Start(), register_use->pos());
1933  return;
1934  }
1935 
1936  if (block_pos[reg].Value() < current->End().Value()) {
1937  // Register becomes blocked before the current range end. Split before that
1938  // position.
1939  LiveRange* tail = SplitBetween(current, current->Start(),
1940  block_pos[reg].InstructionStart());
1941  if (!AllocationOk()) return;
1942  AddToUnhandledSorted(tail);
1943  }
1944 
1945  // Register reg is not blocked for the whole range.
1946  DCHECK(block_pos[reg].Value() >= current->End().Value());
1947  TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
1948  current->id());
1949  SetLiveRangeAssignedRegister(current, reg);
1950 
1951  // This register was not free. Thus we need to find and spill
1952  // parts of active and inactive live regions that use the same register
1953  // at the same lifetime positions as current.
1954  SplitAndSpillIntersecting(current);
1955 }
1956 
1957 
1958 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
1959  LiveRange* range, LifetimePosition pos) {
1960  BasicBlock* block = GetBlock(pos.InstructionStart());
1961  BasicBlock* loop_header =
1962  block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
1963 
1964  if (loop_header == NULL) return pos;
1965 
1966  UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
1967 
1968  while (loop_header != NULL) {
1969  // We are going to spill live range inside the loop.
1970  // If possible try to move spilling position backwards to loop header.
1971  // This will reduce number of memory moves on the back edge.
1972  LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
1973  loop_header->first_instruction_index());
1974 
1975  if (range->Covers(loop_start)) {
1976  if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
1977  // No register beneficial use inside the loop before the pos.
1978  pos = loop_start;
1979  }
1980  }
1981 
1982  // Try hoisting out to an outer loop.
1983  loop_header = code()->GetContainingLoop(loop_header);
1984  }
1985 
1986  return pos;
1987 }
1988 
1989 
1990 void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
1991  DCHECK(current->HasRegisterAssigned());
1992  int reg = current->assigned_register();
1993  LifetimePosition split_pos = current->Start();
1994  for (int i = 0; i < active_live_ranges_.length(); ++i) {
1995  LiveRange* range = active_live_ranges_[i];
1996  if (range->assigned_register() == reg) {
1997  UsePosition* next_pos = range->NextRegisterPosition(current->Start());
1998  LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
1999  if (next_pos == NULL) {
2000  SpillAfter(range, spill_pos);
2001  } else {
2002  // When spilling between spill_pos and next_pos ensure that the range
2003  // remains spilled at least until the start of the current live range.
2004  // This guarantees that we will not introduce new unhandled ranges that
2005  // start before the current range as this violates allocation invariant
2006  // and will lead to an inconsistent state of active and inactive
2007  // live-ranges: ranges are allocated in order of their start positions,
2008  // ranges are retired from active/inactive when the start of the
2009  // current live-range is larger than their end.
2010  SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
2011  }
2012  if (!AllocationOk()) return;
2013  ActiveToHandled(range);
2014  --i;
2015  }
2016  }
2017 
2018  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
2019  LiveRange* range = inactive_live_ranges_[i];
2020  DCHECK(range->End().Value() > current->Start().Value());
2021  if (range->assigned_register() == reg && !range->IsFixed()) {
2022  LifetimePosition next_intersection = range->FirstIntersection(current);
2023  if (next_intersection.IsValid()) {
2024  UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2025  if (next_pos == NULL) {
2026  SpillAfter(range, split_pos);
2027  } else {
2028  next_intersection = Min(next_intersection, next_pos->pos());
2029  SpillBetween(range, split_pos, next_intersection);
2030  }
2031  if (!AllocationOk()) return;
2032  InactiveToHandled(range);
2033  --i;
2034  }
2035  }
2036  }
2037 }
2038 
2039 
2040 bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
2041  return pos.IsInstructionStart() &&
2042  InstructionAt(pos.InstructionIndex())->IsBlockStart();
2043 }
2044 
2045 
2046 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2047  LifetimePosition pos) {
2048  DCHECK(!range->IsFixed());
2049  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
2050 
2051  if (pos.Value() <= range->Start().Value()) return range;
2052 
2053  // We can't properly connect liveranges if split occured at the end
2054  // of control instruction.
2055  DCHECK(pos.IsInstructionStart() ||
2056  !InstructionAt(pos.InstructionIndex())->IsControl());
2057 
2058  int vreg = GetVirtualRegister();
2059  if (!AllocationOk()) return NULL;
2060  LiveRange* result = LiveRangeFor(vreg);
2061  range->SplitAt(pos, result, zone());
2062  return result;
2063 }
2064 
2065 
2066 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2067  LifetimePosition start,
2068  LifetimePosition end) {
2069  DCHECK(!range->IsFixed());
2070  TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
2071  range->id(), start.Value(), end.Value());
2072 
2073  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2074  DCHECK(split_pos.Value() >= start.Value());
2075  return SplitRangeAt(range, split_pos);
2076 }
2077 
2078 
2079 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2080  LifetimePosition end) {
2081  int start_instr = start.InstructionIndex();
2082  int end_instr = end.InstructionIndex();
2083  DCHECK(start_instr <= end_instr);
2084 
2085  // We have no choice
2086  if (start_instr == end_instr) return end;
2087 
2088  BasicBlock* start_block = GetBlock(start);
2089  BasicBlock* end_block = GetBlock(end);
2090 
2091  if (end_block == start_block) {
2092  // The interval is split in the same basic block. Split at the latest
2093  // possible position.
2094  return end;
2095  }
2096 
2097  BasicBlock* block = end_block;
2098  // Find header of outermost loop.
2099  // TODO(titzer): fix redundancy below.
2100  while (code()->GetContainingLoop(block) != NULL &&
2101  code()->GetContainingLoop(block)->rpo_number_ >
2102  start_block->rpo_number_) {
2103  block = code()->GetContainingLoop(block);
2104  }
2105 
2106  // We did not find any suitable outer loop. Split at the latest possible
2107  // position unless end_block is a loop header itself.
2108  if (block == end_block && !end_block->IsLoopHeader()) return end;
2109 
2111  block->first_instruction_index());
2112 }
2113 
2114 
2115 void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
2116  LiveRange* second_part = SplitRangeAt(range, pos);
2117  if (!AllocationOk()) return;
2118  Spill(second_part);
2119 }
2120 
2121 
2122 void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
2123  LifetimePosition end) {
2124  SpillBetweenUntil(range, start, start, end);
2125 }
2126 
2127 
2128 void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
2129  LifetimePosition start,
2130  LifetimePosition until,
2131  LifetimePosition end) {
2132  CHECK(start.Value() < end.Value());
2133  LiveRange* second_part = SplitRangeAt(range, start);
2134  if (!AllocationOk()) return;
2135 
2136  if (second_part->Start().Value() < end.Value()) {
2137  // The split result intersects with [start, end[.
2138  // Split it at position between ]start+1, end[, spill the middle part
2139  // and put the rest to unhandled.
2140  LiveRange* third_part = SplitBetween(
2141  second_part, Max(second_part->Start().InstructionEnd(), until),
2143  if (!AllocationOk()) return;
2144 
2145  DCHECK(third_part != second_part);
2146 
2147  Spill(second_part);
2148  AddToUnhandledSorted(third_part);
2149  } else {
2150  // The split result does not intersect with [start, end[.
2151  // Nothing to spill. Just put it to unhandled as whole.
2152  AddToUnhandledSorted(second_part);
2153  }
2154 }
2155 
2156 
2157 void RegisterAllocator::Spill(LiveRange* range) {
2158  DCHECK(!range->IsSpilled());
2159  TraceAlloc("Spilling live range %d\n", range->id());
2160  LiveRange* first = range->TopLevel();
2161 
2162  if (!first->HasAllocatedSpillOperand()) {
2163  InstructionOperand* op = TryReuseSpillSlot(range);
2164  if (op == NULL) {
2165  // Allocate a new operand referring to the spill slot.
2166  RegisterKind kind = range->Kind();
2167  int index = code()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
2168  if (kind == DOUBLE_REGISTERS) {
2169  op = DoubleStackSlotOperand::Create(index, zone());
2170  } else {
2171  DCHECK(kind == GENERAL_REGISTERS);
2172  op = StackSlotOperand::Create(index, zone());
2173  }
2174  }
2175  first->SetSpillOperand(op);
2176  }
2177  range->MakeSpilled(code_zone());
2178 }
2179 
2180 
2181 int RegisterAllocator::RegisterCount() const { return num_registers_; }
2182 
2183 
2184 #ifdef DEBUG
2185 
2186 
2187 void RegisterAllocator::Verify() const {
2188  for (int i = 0; i < live_ranges()->length(); ++i) {
2189  LiveRange* current = live_ranges()->at(i);
2190  if (current != NULL) current->Verify();
2191  }
2192 }
2193 
2194 
2195 #endif
2196 
2197 
2198 void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2199  int reg) {
2200  if (range->Kind() == DOUBLE_REGISTERS) {
2201  assigned_double_registers_->Add(reg);
2202  } else {
2203  DCHECK(range->Kind() == GENERAL_REGISTERS);
2204  assigned_registers_->Add(reg);
2205  }
2206  range->set_assigned_register(reg, code_zone());
2207 }
2208 
2209 
2211  RegisterAllocator* allocator)
2212  : CompilationPhase(name, allocator->code()->linkage()->info()),
2213  allocator_(allocator) {
2214  if (FLAG_turbo_stats) {
2216  allocator->zone()->allocation_size();
2217  }
2218 }
2219 
2220 
2222  if (FLAG_turbo_stats) {
2223  unsigned size = allocator_->zone()->allocation_size() -
2225  isolate()->GetTStatistics()->SaveTiming(name(), base::TimeDelta(), size);
2226  }
2227 #ifdef DEBUG
2228  if (allocator_ != NULL) allocator_->Verify();
2229 #endif
2230 }
2231 }
2232 }
2233 } // namespace v8::internal::compiler
The superclass of all JavaScript values and objects.
Definition: v8.h:1440
static void VPrint(const char *format, va_list args)
void ConvertTo(Kind kind, int index)
Definition: instruction.h:75
static LifetimePosition FromInstructionIndex(int index)
void set_assigned_register(int reg, Zone *zone)
void SetSpillOperand(InstructionOperand *operand)
void ShortenTo(LifetimePosition start)
UsePosition * NextRegisterPosition(LifetimePosition start)
InstructionOperand * CreateAssignedOperand(Zone *zone)
LifetimePosition FirstIntersection(LiveRange *other)
InstructionOperand * FirstHint() const
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start)
InstructionOperand * current_hint_operand_
UsePosition * PreviousUsePositionRegisterIsBeneficial(LifetimePosition start)
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
UsePosition * NextUsePosition(LifetimePosition start)
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
bool CanBeSpilled(LifetimePosition pos)
bool ShouldBeAllocatedBefore(const LiveRange *other) const
void SplitAt(LifetimePosition position, LiveRange *result, Zone *zone)
bool CanCover(LifetimePosition position) const
InstructionOperand * GetSpillOperand() const
bool Covers(LifetimePosition position)
void AddUsePosition(LifetimePosition pos, InstructionOperand *operand, InstructionOperand *hint, Zone *zone)
void AdvanceLastProcessedMarker(UseInterval *to_start_of, LifetimePosition but_not_past) const
UseInterval * FirstSearchIntervalForPosition(LifetimePosition position) const
RegisterAllocatorPhase(const char *name, RegisterAllocator *allocator)
static const UnallocatedOperand * cast(const InstructionOperand *op)
Definition: instruction.h:160
UseInterval(LifetimePosition start, LifetimePosition end)
LifetimePosition Intersect(const UseInterval *other) const
void set_start(LifetimePosition start)
void SplitAt(LifetimePosition pos, Zone *zone)
bool Contains(LifetimePosition point) const
UsePosition(LifetimePosition pos, InstructionOperand *operand, InstructionOperand *hint)
InstructionOperand *const operand_
InstructionOperand * operand() const
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
void USE(T)
Definition: macros.h:322
STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=Register::kMaxNumAllocatableRegisters)
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
ZoneDeque< PointerMap * > PointerMapDeque
Definition: instruction.h:805
static int UnhandledSortHelper(LiveRange *const *a, LiveRange *const *b)
PerThreadAssertScopeDebugOnly< HANDLE_DEREFERENCE_ASSERT, true > AllowHandleDereference
Definition: assert-scope.h:122
const int kMaxInt
Definition: globals.h:109
void PrintF(const char *format,...)
Definition: utils.cc:80
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static const char * AllocationIndexToString(int index)
static const int kMaxNumAllocatableRegisters
static int NumAllocatableAliasedRegisters()
static int NumAllocatableRegisters()
static const char * AllocationIndexToString(int index)
static const int kMaxNumAllocatableRegisters
Definition: assembler-arm.h:96