V8 Project
lithium-gap-resolver-x87.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_X87
8 
11 
12 namespace v8 {
13 namespace internal {
14 
15 LGapResolver::LGapResolver(LCodeGen* owner)
16  : cgen_(owner),
17  moves_(32, owner->zone()),
18  source_uses_(),
19  destination_uses_(),
20  spilled_register_(-1) {}
21 
22 
23 void LGapResolver::Resolve(LParallelMove* parallel_move) {
24  DCHECK(HasBeenReset());
25  // Build up a worklist of moves.
26  BuildInitialMoveList(parallel_move);
27 
28  for (int i = 0; i < moves_.length(); ++i) {
29  LMoveOperands move = moves_[i];
30  // Skip constants to perform them last. They don't block other moves
31  // and skipping such moves with register destinations keeps those
32  // registers free for the whole algorithm.
33  if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
34  PerformMove(i);
35  }
36  }
37 
38  // Perform the moves with constant sources.
39  for (int i = 0; i < moves_.length(); ++i) {
40  if (!moves_[i].IsEliminated()) {
41  DCHECK(moves_[i].source()->IsConstantOperand());
42  EmitMove(i);
43  }
44  }
45 
46  Finish();
47  DCHECK(HasBeenReset());
48 }
49 
50 
51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
52  // Perform a linear sweep of the moves to add them to the initial list of
53  // moves to perform, ignoring any move that is redundant (the source is
54  // the same as the destination, the destination is ignored and
55  // unallocated, or the move was already eliminated).
56  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
57  for (int i = 0; i < moves->length(); ++i) {
58  LMoveOperands move = moves->at(i);
59  if (!move.IsRedundant()) AddMove(move);
60  }
61  Verify();
62 }
63 
64 
65 void LGapResolver::PerformMove(int index) {
66  // Each call to this function performs a move and deletes it from the move
67  // graph. We first recursively perform any move blocking this one. We
68  // mark a move as "pending" on entry to PerformMove in order to detect
69  // cycles in the move graph. We use operand swaps to resolve cycles,
70  // which means that a call to PerformMove could change any source operand
71  // in the move graph.
72 
73  DCHECK(!moves_[index].IsPending());
74  DCHECK(!moves_[index].IsRedundant());
75 
76  // Clear this move's destination to indicate a pending move. The actual
77  // destination is saved on the side.
78  DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
79  LOperand* destination = moves_[index].destination();
80  moves_[index].set_destination(NULL);
81 
82  // Perform a depth-first traversal of the move graph to resolve
83  // dependencies. Any unperformed, unpending move with a source the same
84  // as this one's destination blocks this one so recursively perform all
85  // such moves.
86  for (int i = 0; i < moves_.length(); ++i) {
87  LMoveOperands other_move = moves_[i];
88  if (other_move.Blocks(destination) && !other_move.IsPending()) {
89  // Though PerformMove can change any source operand in the move graph,
90  // this call cannot create a blocking move via a swap (this loop does
91  // not miss any). Assume there is a non-blocking move with source A
92  // and this move is blocked on source B and there is a swap of A and
93  // B. Then A and B must be involved in the same cycle (or they would
94  // not be swapped). Since this move's destination is B and there is
95  // only a single incoming edge to an operand, this move must also be
96  // involved in the same cycle. In that case, the blocking move will
97  // be created but will be "pending" when we return from PerformMove.
98  PerformMove(i);
99  }
100  }
101 
102  // We are about to resolve this move and don't need it marked as
103  // pending, so restore its destination.
104  moves_[index].set_destination(destination);
105 
106  // This move's source may have changed due to swaps to resolve cycles and
107  // so it may now be the last move in the cycle. If so remove it.
108  if (moves_[index].source()->Equals(destination)) {
109  RemoveMove(index);
110  return;
111  }
112 
113  // The move may be blocked on a (at most one) pending move, in which case
114  // we have a cycle. Search for such a blocking move and perform a swap to
115  // resolve it.
116  for (int i = 0; i < moves_.length(); ++i) {
117  LMoveOperands other_move = moves_[i];
118  if (other_move.Blocks(destination)) {
119  DCHECK(other_move.IsPending());
120  EmitSwap(index);
121  return;
122  }
123  }
124 
125  // This move is not blocked.
126  EmitMove(index);
127 }
128 
129 
130 void LGapResolver::AddMove(LMoveOperands move) {
131  LOperand* source = move.source();
132  if (source->IsRegister()) ++source_uses_[source->index()];
133 
134  LOperand* destination = move.destination();
135  if (destination->IsRegister()) ++destination_uses_[destination->index()];
136 
137  moves_.Add(move, cgen_->zone());
138 }
139 
140 
141 void LGapResolver::RemoveMove(int index) {
142  LOperand* source = moves_[index].source();
143  if (source->IsRegister()) {
144  --source_uses_[source->index()];
145  DCHECK(source_uses_[source->index()] >= 0);
146  }
147 
148  LOperand* destination = moves_[index].destination();
149  if (destination->IsRegister()) {
150  --destination_uses_[destination->index()];
151  DCHECK(destination_uses_[destination->index()] >= 0);
152  }
153 
154  moves_[index].Eliminate();
155 }
156 
157 
158 int LGapResolver::CountSourceUses(LOperand* operand) {
159  int count = 0;
160  for (int i = 0; i < moves_.length(); ++i) {
161  if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
162  ++count;
163  }
164  }
165  return count;
166 }
167 
168 
169 Register LGapResolver::GetFreeRegisterNot(Register reg) {
170  int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
171  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
172  if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
173  return Register::FromAllocationIndex(i);
174  }
175  }
176  return no_reg;
177 }
178 
179 
180 bool LGapResolver::HasBeenReset() {
181  if (!moves_.is_empty()) return false;
182  if (spilled_register_ >= 0) return false;
183 
184  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
185  if (source_uses_[i] != 0) return false;
186  if (destination_uses_[i] != 0) return false;
187  }
188  return true;
189 }
190 
191 
192 void LGapResolver::Verify() {
193 #ifdef ENABLE_SLOW_DCHECKS
194  // No operand should be the destination for more than one move.
195  for (int i = 0; i < moves_.length(); ++i) {
196  LOperand* destination = moves_[i].destination();
197  for (int j = i + 1; j < moves_.length(); ++j) {
198  SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
199  }
200  }
201 #endif
202 }
203 
204 
205 #define __ ACCESS_MASM(cgen_->masm())
206 
207 void LGapResolver::Finish() {
208  if (spilled_register_ >= 0) {
209  __ pop(Register::FromAllocationIndex(spilled_register_));
210  spilled_register_ = -1;
211  }
212  moves_.Rewind(0);
213 }
214 
215 
216 void LGapResolver::EnsureRestored(LOperand* operand) {
217  if (operand->IsRegister() && operand->index() == spilled_register_) {
218  __ pop(Register::FromAllocationIndex(spilled_register_));
219  spilled_register_ = -1;
220  }
221 }
222 
223 
224 Register LGapResolver::EnsureTempRegister() {
225  // 1. We may have already spilled to create a temp register.
226  if (spilled_register_ >= 0) {
227  return Register::FromAllocationIndex(spilled_register_);
228  }
229 
230  // 2. We may have a free register that we can use without spilling.
231  Register free = GetFreeRegisterNot(no_reg);
232  if (!free.is(no_reg)) return free;
233 
234  // 3. Prefer to spill a register that is not used in any remaining move
235  // because it will not need to be restored until the end.
236  for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
237  if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
238  Register scratch = Register::FromAllocationIndex(i);
239  __ push(scratch);
240  spilled_register_ = i;
241  return scratch;
242  }
243  }
244 
245  // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
246  Register scratch = Register::FromAllocationIndex(0);
247  __ push(scratch);
248  spilled_register_ = 0;
249  return scratch;
250 }
251 
252 
253 void LGapResolver::EmitMove(int index) {
254  LOperand* source = moves_[index].source();
255  LOperand* destination = moves_[index].destination();
256  EnsureRestored(source);
257  EnsureRestored(destination);
258 
259  // Dispatch on the source and destination operand kinds. Not all
260  // combinations are possible.
261  if (source->IsRegister()) {
262  DCHECK(destination->IsRegister() || destination->IsStackSlot());
263  Register src = cgen_->ToRegister(source);
264  Operand dst = cgen_->ToOperand(destination);
265  __ mov(dst, src);
266 
267  } else if (source->IsStackSlot()) {
268  DCHECK(destination->IsRegister() || destination->IsStackSlot());
269  Operand src = cgen_->ToOperand(source);
270  if (destination->IsRegister()) {
271  Register dst = cgen_->ToRegister(destination);
272  __ mov(dst, src);
273  } else {
274  // Spill on demand to use a temporary register for memory-to-memory
275  // moves.
276  Register tmp = EnsureTempRegister();
277  Operand dst = cgen_->ToOperand(destination);
278  __ mov(tmp, src);
279  __ mov(dst, tmp);
280  }
281 
282  } else if (source->IsConstantOperand()) {
283  LConstantOperand* constant_source = LConstantOperand::cast(source);
284  if (destination->IsRegister()) {
285  Register dst = cgen_->ToRegister(destination);
286  Representation r = cgen_->IsSmi(constant_source)
287  ? Representation::Smi() : Representation::Integer32();
288  if (cgen_->IsInteger32(constant_source)) {
289  __ Move(dst, cgen_->ToImmediate(constant_source, r));
290  } else {
291  __ LoadObject(dst, cgen_->ToHandle(constant_source));
292  }
293  } else if (destination->IsDoubleRegister()) {
294  double v = cgen_->ToDouble(constant_source);
295  uint64_t int_val = bit_cast<uint64_t, double>(v);
296  int32_t lower = static_cast<int32_t>(int_val);
297  int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
298  __ push(Immediate(upper));
299  __ push(Immediate(lower));
300  X87Register dst = cgen_->ToX87Register(destination);
301  cgen_->X87Mov(dst, MemOperand(esp, 0));
302  __ add(esp, Immediate(kDoubleSize));
303  } else {
304  DCHECK(destination->IsStackSlot());
305  Operand dst = cgen_->ToOperand(destination);
306  Representation r = cgen_->IsSmi(constant_source)
307  ? Representation::Smi() : Representation::Integer32();
308  if (cgen_->IsInteger32(constant_source)) {
309  __ Move(dst, cgen_->ToImmediate(constant_source, r));
310  } else {
311  Register tmp = EnsureTempRegister();
312  __ LoadObject(tmp, cgen_->ToHandle(constant_source));
313  __ mov(dst, tmp);
314  }
315  }
316 
317  } else if (source->IsDoubleRegister()) {
318  // load from the register onto the stack, store in destination, which must
319  // be a double stack slot in the non-SSE2 case.
320  if (destination->IsDoubleStackSlot()) {
321  Operand dst = cgen_->ToOperand(destination);
322  X87Register src = cgen_->ToX87Register(source);
323  cgen_->X87Mov(dst, src);
324  } else {
325  X87Register dst = cgen_->ToX87Register(destination);
326  X87Register src = cgen_->ToX87Register(source);
327  cgen_->X87Mov(dst, src);
328  }
329  } else if (source->IsDoubleStackSlot()) {
330  // load from the stack slot on top of the floating point stack, and then
331  // store in destination. If destination is a double register, then it
332  // represents the top of the stack and nothing needs to be done.
333  if (destination->IsDoubleStackSlot()) {
334  Register tmp = EnsureTempRegister();
335  Operand src0 = cgen_->ToOperand(source);
336  Operand src1 = cgen_->HighOperand(source);
337  Operand dst0 = cgen_->ToOperand(destination);
338  Operand dst1 = cgen_->HighOperand(destination);
339  __ mov(tmp, src0); // Then use tmp to copy source to destination.
340  __ mov(dst0, tmp);
341  __ mov(tmp, src1);
342  __ mov(dst1, tmp);
343  } else {
344  Operand src = cgen_->ToOperand(source);
345  X87Register dst = cgen_->ToX87Register(destination);
346  cgen_->X87Mov(dst, src);
347  }
348  } else {
349  UNREACHABLE();
350  }
351 
352  RemoveMove(index);
353 }
354 
355 
356 void LGapResolver::EmitSwap(int index) {
357  LOperand* source = moves_[index].source();
358  LOperand* destination = moves_[index].destination();
359  EnsureRestored(source);
360  EnsureRestored(destination);
361 
362  // Dispatch on the source and destination operand kinds. Not all
363  // combinations are possible.
364  if (source->IsRegister() && destination->IsRegister()) {
365  // Register-register.
366  Register src = cgen_->ToRegister(source);
367  Register dst = cgen_->ToRegister(destination);
368  __ xchg(dst, src);
369 
370  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
371  (source->IsStackSlot() && destination->IsRegister())) {
372  // Register-memory. Use a free register as a temp if possible. Do not
373  // spill on demand because the simple spill implementation cannot avoid
374  // spilling src at this point.
375  Register tmp = GetFreeRegisterNot(no_reg);
376  Register reg =
377  cgen_->ToRegister(source->IsRegister() ? source : destination);
378  Operand mem =
379  cgen_->ToOperand(source->IsRegister() ? destination : source);
380  if (tmp.is(no_reg)) {
381  __ xor_(reg, mem);
382  __ xor_(mem, reg);
383  __ xor_(reg, mem);
384  } else {
385  __ mov(tmp, mem);
386  __ mov(mem, reg);
387  __ mov(reg, tmp);
388  }
389 
390  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
391  // Memory-memory. Spill on demand to use a temporary. If there is a
392  // free register after that, use it as a second temporary.
393  Register tmp0 = EnsureTempRegister();
394  Register tmp1 = GetFreeRegisterNot(tmp0);
395  Operand src = cgen_->ToOperand(source);
396  Operand dst = cgen_->ToOperand(destination);
397  if (tmp1.is(no_reg)) {
398  // Only one temp register available to us.
399  __ mov(tmp0, dst);
400  __ xor_(tmp0, src);
401  __ xor_(src, tmp0);
402  __ xor_(tmp0, src);
403  __ mov(dst, tmp0);
404  } else {
405  __ mov(tmp0, dst);
406  __ mov(tmp1, src);
407  __ mov(dst, tmp1);
408  __ mov(src, tmp0);
409  }
410  } else {
411  // No other combinations are possible.
412  UNREACHABLE();
413  }
414 
415  // The swap of source and destination has executed a move from source to
416  // destination.
417  RemoveMove(index);
418 
419  // Any unperformed (including pending) move with a source of either
420  // this move's source or destination needs to have their source
421  // changed to reflect the state of affairs after the swap.
422  for (int i = 0; i < moves_.length(); ++i) {
423  LMoveOperands other_move = moves_[i];
424  if (other_move.Blocks(source)) {
425  moves_[i].set_source(destination);
426  } else if (other_move.Blocks(destination)) {
427  moves_[i].set_source(source);
428  }
429  }
430 
431  // In addition to swapping the actual uses as sources, we need to update
432  // the use counts.
433  if (source->IsRegister() && destination->IsRegister()) {
434  int temp = source_uses_[source->index()];
435  source_uses_[source->index()] = source_uses_[destination->index()];
436  source_uses_[destination->index()] = temp;
437  } else if (source->IsRegister()) {
438  // We don't have use counts for non-register operands like destination.
439  // Compute those counts now.
440  source_uses_[source->index()] = CountSourceUses(source);
441  } else if (destination->IsRegister()) {
442  source_uses_[destination->index()] = CountSourceUses(destination);
443  }
444 }
445 
446 #undef __
447 
448 } } // namespace v8::internal
449 
450 #endif // V8_TARGET_ARCH_X87
#define SLOW_DCHECK(condition)
Definition: checks.h:30
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
const int kBitsPerInt
Definition: globals.h:165
const Register esp
const int kDoubleSize
Definition: globals.h:127
const Register no_reg
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20