V8 Project
lithium-gap-resolver-ia32.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_IA32
8 
11 
12 namespace v8 {
13 namespace internal {
14 
15 LGapResolver::LGapResolver(LCodeGen* owner)
16  : cgen_(owner),
17  moves_(32, owner->zone()),
18  source_uses_(),
19  destination_uses_(),
20  spilled_register_(-1) {}
21 
22 
23 void LGapResolver::Resolve(LParallelMove* parallel_move) {
24  DCHECK(HasBeenReset());
25  // Build up a worklist of moves.
26  BuildInitialMoveList(parallel_move);
27 
28  for (int i = 0; i < moves_.length(); ++i) {
29  LMoveOperands move = moves_[i];
30  // Skip constants to perform them last. They don't block other moves
31  // and skipping such moves with register destinations keeps those
32  // registers free for the whole algorithm.
33  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
34  PerformMove(i);
35  }
36  }
37 
38  // Perform the moves with constant sources.
39  for (int i = 0; i < moves_.length(); ++i) {
40  if (!moves_[i].IsEliminated()) {
41  DCHECK(moves_[i].source()->IsConstantOperand());
42  EmitMove(i);
43  }
44  }
45 
46  Finish();
47  DCHECK(HasBeenReset());
48 }
49 
50 
51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
52  // Perform a linear sweep of the moves to add them to the initial list of
53  // moves to perform, ignoring any move that is redundant (the source is
54  // the same as the destination, the destination is ignored and
55  // unallocated, or the move was already eliminated).
56  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
57  for (int i = 0; i < moves->length(); ++i) {
58  LMoveOperands move = moves->at(i);
59  if (!move.IsRedundant()) AddMove(move);
60  }
61  Verify();
62 }
63 
64 
65 void LGapResolver::PerformMove(int index) {
66  // Each call to this function performs a move and deletes it from the move
67  // graph. We first recursively perform any move blocking this one. We
68  // mark a move as "pending" on entry to PerformMove in order to detect
69  // cycles in the move graph. We use operand swaps to resolve cycles,
70  // which means that a call to PerformMove could change any source operand
71  // in the move graph.
72 
73  DCHECK(!moves_[index].IsPending());
74  DCHECK(!moves_[index].IsRedundant());
75 
76  // Clear this move's destination to indicate a pending move. The actual
77  // destination is saved on the side.
78  DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
79  LOperand* destination = moves_[index].destination();
80  moves_[index].set_destination(NULL);
81 
82  // Perform a depth-first traversal of the move graph to resolve
83  // dependencies. Any unperformed, unpending move with a source the same
84  // as this one's destination blocks this one so recursively perform all
85  // such moves.
86  for (int i = 0; i < moves_.length(); ++i) {
87  LMoveOperands other_move = moves_[i];
88  if (other_move.Blocks(destination) && !other_move.IsPending()) {
89  // Though PerformMove can change any source operand in the move graph,
90  // this call cannot create a blocking move via a swap (this loop does
91  // not miss any). Assume there is a non-blocking move with source A
92  // and this move is blocked on source B and there is a swap of A and
93  // B. Then A and B must be involved in the same cycle (or they would
94  // not be swapped). Since this move's destination is B and there is
95  // only a single incoming edge to an operand, this move must also be
96  // involved in the same cycle. In that case, the blocking move will
97  // be created but will be "pending" when we return from PerformMove.
98  PerformMove(i);
99  }
100  }
101 
102  // We are about to resolve this move and don't need it marked as
103  // pending, so restore its destination.
104  moves_[index].set_destination(destination);
105 
106  // This move's source may have changed due to swaps to resolve cycles and
107  // so it may now be the last move in the cycle. If so remove it.
108  if (moves_[index].source()->Equals(destination)) {
109  RemoveMove(index);
110  return;
111  }
112 
113  // The move may be blocked on a (at most one) pending move, in which case
114  // we have a cycle. Search for such a blocking move and perform a swap to
115  // resolve it.
116  for (int i = 0; i < moves_.length(); ++i) {
117  LMoveOperands other_move = moves_[i];
118  if (other_move.Blocks(destination)) {
119  DCHECK(other_move.IsPending());
120  EmitSwap(index);
121  return;
122  }
123  }
124 
125  // This move is not blocked.
126  EmitMove(index);
127 }
128 
129 
130 void LGapResolver::AddMove(LMoveOperands move) {
131  LOperand* source = move.source();
132  if (source->IsRegister()) ++source_uses_[source->index()];
133 
134  LOperand* destination = move.destination();
135  if (destination->IsRegister()) ++destination_uses_[destination->index()];
136 
137  moves_.Add(move, cgen_->zone());
138 }
139 
140 
141 void LGapResolver::RemoveMove(int index) {
142  LOperand* source = moves_[index].source();
143  if (source->IsRegister()) {
144  --source_uses_[source->index()];
145  DCHECK(source_uses_[source->index()] >= 0);
146  }
147 
148  LOperand* destination = moves_[index].destination();
149  if (destination->IsRegister()) {
150  --destination_uses_[destination->index()];
151  DCHECK(destination_uses_[destination->index()] >= 0);
152  }
153 
154  moves_[index].Eliminate();
155 }
156 
157 
158 int LGapResolver::CountSourceUses(LOperand* operand) {
159  int count = 0;
160  for (int i = 0; i < moves_.length(); ++i) {
161  if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
162  ++count;
163  }
164  }
165  return count;
166 }
167 
168 
169 Register LGapResolver::GetFreeRegisterNot(Register reg) {
170  int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
171  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
172  if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
173  return Register::FromAllocationIndex(i);
174  }
175  }
176  return no_reg;
177 }
178 
179 
180 bool LGapResolver::HasBeenReset() {
181  if (!moves_.is_empty()) return false;
182  if (spilled_register_ >= 0) return false;
183 
184  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
185  if (source_uses_[i] != 0) return false;
186  if (destination_uses_[i] != 0) return false;
187  }
188  return true;
189 }
190 
191 
192 void LGapResolver::Verify() {
193 #ifdef ENABLE_SLOW_DCHECKS
194  // No operand should be the destination for more than one move.
195  for (int i = 0; i < moves_.length(); ++i) {
196  LOperand* destination = moves_[i].destination();
197  for (int j = i + 1; j < moves_.length(); ++j) {
198  SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
199  }
200  }
201 #endif
202 }
203 
204 
205 #define __ ACCESS_MASM(cgen_->masm())
206 
207 void LGapResolver::Finish() {
208  if (spilled_register_ >= 0) {
209  __ pop(Register::FromAllocationIndex(spilled_register_));
210  spilled_register_ = -1;
211  }
212  moves_.Rewind(0);
213 }
214 
215 
216 void LGapResolver::EnsureRestored(LOperand* operand) {
217  if (operand->IsRegister() && operand->index() == spilled_register_) {
218  __ pop(Register::FromAllocationIndex(spilled_register_));
219  spilled_register_ = -1;
220  }
221 }
222 
223 
224 Register LGapResolver::EnsureTempRegister() {
225  // 1. We may have already spilled to create a temp register.
226  if (spilled_register_ >= 0) {
227  return Register::FromAllocationIndex(spilled_register_);
228  }
229 
230  // 2. We may have a free register that we can use without spilling.
231  Register free = GetFreeRegisterNot(no_reg);
232  if (!free.is(no_reg)) return free;
233 
234  // 3. Prefer to spill a register that is not used in any remaining move
235  // because it will not need to be restored until the end.
236  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
237  if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
238  Register scratch = Register::FromAllocationIndex(i);
239  __ push(scratch);
240  spilled_register_ = i;
241  return scratch;
242  }
243  }
244 
245  // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
246  Register scratch = Register::FromAllocationIndex(0);
247  __ push(scratch);
248  spilled_register_ = 0;
249  return scratch;
250 }
251 
252 
253 void LGapResolver::EmitMove(int index) {
254  LOperand* source = moves_[index].source();
255  LOperand* destination = moves_[index].destination();
256  EnsureRestored(source);
257  EnsureRestored(destination);
258 
259  // Dispatch on the source and destination operand kinds. Not all
260  // combinations are possible.
261  if (source->IsRegister()) {
262  DCHECK(destination->IsRegister() || destination->IsStackSlot());
263  Register src = cgen_->ToRegister(source);
264  Operand dst = cgen_->ToOperand(destination);
265  __ mov(dst, src);
266 
267  } else if (source->IsStackSlot()) {
268  DCHECK(destination->IsRegister() || destination->IsStackSlot());
269  Operand src = cgen_->ToOperand(source);
270  if (destination->IsRegister()) {
271  Register dst = cgen_->ToRegister(destination);
272  __ mov(dst, src);
273  } else {
274  // Spill on demand to use a temporary register for memory-to-memory
275  // moves.
276  Register tmp = EnsureTempRegister();
277  Operand dst = cgen_->ToOperand(destination);
278  __ mov(tmp, src);
279  __ mov(dst, tmp);
280  }
281 
282  } else if (source->IsConstantOperand()) {
283  LConstantOperand* constant_source = LConstantOperand::cast(source);
284  if (destination->IsRegister()) {
285  Register dst = cgen_->ToRegister(destination);
286  Representation r = cgen_->IsSmi(constant_source)
287  ? Representation::Smi() : Representation::Integer32();
288  if (cgen_->IsInteger32(constant_source)) {
289  __ Move(dst, cgen_->ToImmediate(constant_source, r));
290  } else {
291  __ LoadObject(dst, cgen_->ToHandle(constant_source));
292  }
293  } else if (destination->IsDoubleRegister()) {
294  double v = cgen_->ToDouble(constant_source);
295  uint64_t int_val = bit_cast<uint64_t, double>(v);
296  int32_t lower = static_cast<int32_t>(int_val);
297  int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
298  XMMRegister dst = cgen_->ToDoubleRegister(destination);
299  if (int_val == 0) {
300  __ xorps(dst, dst);
301  } else {
302  __ push(Immediate(upper));
303  __ push(Immediate(lower));
304  __ movsd(dst, Operand(esp, 0));
305  __ add(esp, Immediate(kDoubleSize));
306  }
307  } else {
308  DCHECK(destination->IsStackSlot());
309  Operand dst = cgen_->ToOperand(destination);
310  Representation r = cgen_->IsSmi(constant_source)
311  ? Representation::Smi() : Representation::Integer32();
312  if (cgen_->IsInteger32(constant_source)) {
313  __ Move(dst, cgen_->ToImmediate(constant_source, r));
314  } else {
315  Register tmp = EnsureTempRegister();
316  __ LoadObject(tmp, cgen_->ToHandle(constant_source));
317  __ mov(dst, tmp);
318  }
319  }
320 
321  } else if (source->IsDoubleRegister()) {
322  XMMRegister src = cgen_->ToDoubleRegister(source);
323  if (destination->IsDoubleRegister()) {
324  XMMRegister dst = cgen_->ToDoubleRegister(destination);
325  __ movaps(dst, src);
326  } else {
327  DCHECK(destination->IsDoubleStackSlot());
328  Operand dst = cgen_->ToOperand(destination);
329  __ movsd(dst, src);
330  }
331  } else if (source->IsDoubleStackSlot()) {
332  DCHECK(destination->IsDoubleRegister() ||
333  destination->IsDoubleStackSlot());
334  Operand src = cgen_->ToOperand(source);
335  if (destination->IsDoubleRegister()) {
336  XMMRegister dst = cgen_->ToDoubleRegister(destination);
337  __ movsd(dst, src);
338  } else {
339  // We rely on having xmm0 available as a fixed scratch register.
340  Operand dst = cgen_->ToOperand(destination);
341  __ movsd(xmm0, src);
342  __ movsd(dst, xmm0);
343  }
344  } else {
345  UNREACHABLE();
346  }
347 
348  RemoveMove(index);
349 }
350 
351 
352 void LGapResolver::EmitSwap(int index) {
353  LOperand* source = moves_[index].source();
354  LOperand* destination = moves_[index].destination();
355  EnsureRestored(source);
356  EnsureRestored(destination);
357 
358  // Dispatch on the source and destination operand kinds. Not all
359  // combinations are possible.
360  if (source->IsRegister() && destination->IsRegister()) {
361  // Register-register.
362  Register src = cgen_->ToRegister(source);
363  Register dst = cgen_->ToRegister(destination);
364  __ xchg(dst, src);
365 
366  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
367  (source->IsStackSlot() && destination->IsRegister())) {
368  // Register-memory. Use a free register as a temp if possible. Do not
369  // spill on demand because the simple spill implementation cannot avoid
370  // spilling src at this point.
371  Register tmp = GetFreeRegisterNot(no_reg);
372  Register reg =
373  cgen_->ToRegister(source->IsRegister() ? source : destination);
374  Operand mem =
375  cgen_->ToOperand(source->IsRegister() ? destination : source);
376  if (tmp.is(no_reg)) {
377  __ xor_(reg, mem);
378  __ xor_(mem, reg);
379  __ xor_(reg, mem);
380  } else {
381  __ mov(tmp, mem);
382  __ mov(mem, reg);
383  __ mov(reg, tmp);
384  }
385 
386  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
387  // Memory-memory. Spill on demand to use a temporary. If there is a
388  // free register after that, use it as a second temporary.
389  Register tmp0 = EnsureTempRegister();
390  Register tmp1 = GetFreeRegisterNot(tmp0);
391  Operand src = cgen_->ToOperand(source);
392  Operand dst = cgen_->ToOperand(destination);
393  if (tmp1.is(no_reg)) {
394  // Only one temp register available to us.
395  __ mov(tmp0, dst);
396  __ xor_(tmp0, src);
397  __ xor_(src, tmp0);
398  __ xor_(tmp0, src);
399  __ mov(dst, tmp0);
400  } else {
401  __ mov(tmp0, dst);
402  __ mov(tmp1, src);
403  __ mov(dst, tmp1);
404  __ mov(src, tmp0);
405  }
406  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
407  // XMM register-register swap. We rely on having xmm0
408  // available as a fixed scratch register.
409  XMMRegister src = cgen_->ToDoubleRegister(source);
410  XMMRegister dst = cgen_->ToDoubleRegister(destination);
411  __ movaps(xmm0, src);
412  __ movaps(src, dst);
413  __ movaps(dst, xmm0);
414  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
415  // XMM register-memory swap. We rely on having xmm0
416  // available as a fixed scratch register.
417  DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
418  XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
419  ? source
420  : destination);
421  Operand other =
422  cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
423  __ movsd(xmm0, other);
424  __ movsd(other, reg);
425  __ movaps(reg, xmm0);
426  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
427  // Double-width memory-to-memory. Spill on demand to use a general
428  // purpose temporary register and also rely on having xmm0 available as
429  // a fixed scratch register.
430  Register tmp = EnsureTempRegister();
431  Operand src0 = cgen_->ToOperand(source);
432  Operand src1 = cgen_->HighOperand(source);
433  Operand dst0 = cgen_->ToOperand(destination);
434  Operand dst1 = cgen_->HighOperand(destination);
435  __ movsd(xmm0, dst0); // Save destination in xmm0.
436  __ mov(tmp, src0); // Then use tmp to copy source to destination.
437  __ mov(dst0, tmp);
438  __ mov(tmp, src1);
439  __ mov(dst1, tmp);
440  __ movsd(src0, xmm0);
441 
442  } else {
443  // No other combinations are possible.
444  UNREACHABLE();
445  }
446 
447  // The swap of source and destination has executed a move from source to
448  // destination.
449  RemoveMove(index);
450 
451  // Any unperformed (including pending) move with a source of either
452  // this move's source or destination needs to have their source
453  // changed to reflect the state of affairs after the swap.
454  for (int i = 0; i < moves_.length(); ++i) {
455  LMoveOperands other_move = moves_[i];
456  if (other_move.Blocks(source)) {
457  moves_[i].set_source(destination);
458  } else if (other_move.Blocks(destination)) {
459  moves_[i].set_source(source);
460  }
461  }
462 
463  // In addition to swapping the actual uses as sources, we need to update
464  // the use counts.
465  if (source->IsRegister() && destination->IsRegister()) {
466  int temp = source_uses_[source->index()];
467  source_uses_[source->index()] = source_uses_[destination->index()];
468  source_uses_[destination->index()] = temp;
469  } else if (source->IsRegister()) {
470  // We don't have use counts for non-register operands like destination.
471  // Compute those counts now.
472  source_uses_[source->index()] = CountSourceUses(source);
473  } else if (destination->IsRegister()) {
474  source_uses_[destination->index()] = CountSourceUses(destination);
475  }
476 }
477 
478 #undef __
479 
480 } } // namespace v8::internal
481 
482 #endif // V8_TARGET_ARCH_IA32
#define SLOW_DCHECK(condition)
Definition: checks.h:30
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
const int kBitsPerInt
Definition: globals.h:165
const Register esp
const int kDoubleSize
Definition: globals.h:127
const XMMRegister xmm0
const Register no_reg
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20