7 #if V8_TARGET_ARCH_IA32
15 LGapResolver::LGapResolver(LCodeGen* owner)
17 moves_(32, owner->zone()),
20 spilled_register_(-1) {}
23 void LGapResolver::Resolve(LParallelMove* parallel_move) {
26 BuildInitialMoveList(parallel_move);
28 for (
int i = 0;
i < moves_.length(); ++
i) {
29 LMoveOperands move = moves_[
i];
33 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
39 for (
int i = 0;
i < moves_.length(); ++
i) {
40 if (!moves_[
i].IsEliminated()) {
41 DCHECK(moves_[
i].source()->IsConstantOperand());
51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
56 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
57 for (
int i = 0;
i < moves->length(); ++
i) {
58 LMoveOperands move = moves->at(
i);
59 if (!move.IsRedundant()) AddMove(move);
65 void LGapResolver::PerformMove(
int index) {
73 DCHECK(!moves_[index].IsPending());
74 DCHECK(!moves_[index].IsRedundant());
79 LOperand* destination = moves_[index].destination();
80 moves_[index].set_destination(
NULL);
86 for (
int i = 0;
i < moves_.length(); ++
i) {
87 LMoveOperands other_move = moves_[
i];
88 if (other_move.Blocks(destination) && !other_move.IsPending()) {
104 moves_[index].set_destination(destination);
108 if (moves_[index].source()->
Equals(destination)) {
116 for (
int i = 0;
i < moves_.length(); ++
i) {
117 LMoveOperands other_move = moves_[
i];
118 if (other_move.Blocks(destination)) {
119 DCHECK(other_move.IsPending());
130 void LGapResolver::AddMove(LMoveOperands move) {
131 LOperand* source = move.source();
132 if (source->IsRegister()) ++source_uses_[source->index()];
134 LOperand* destination = move.destination();
135 if (destination->IsRegister()) ++destination_uses_[destination->index()];
137 moves_.Add(move, cgen_->zone());
141 void LGapResolver::RemoveMove(
int index) {
142 LOperand* source = moves_[index].source();
143 if (source->IsRegister()) {
144 --source_uses_[source->index()];
145 DCHECK(source_uses_[source->index()] >= 0);
148 LOperand* destination = moves_[index].destination();
149 if (destination->IsRegister()) {
150 --destination_uses_[destination->index()];
151 DCHECK(destination_uses_[destination->index()] >= 0);
154 moves_[index].Eliminate();
158 int LGapResolver::CountSourceUses(LOperand* operand) {
160 for (
int i = 0;
i < moves_.length(); ++
i) {
161 if (!moves_[
i].IsEliminated() && moves_[
i].source()->
Equals(operand)) {
169 Register LGapResolver::GetFreeRegisterNot(Register reg) {
170 int skip_index = reg.is(
no_reg) ? -1 : Register::ToAllocationIndex(reg);
171 for (
int i = 0;
i < Register::NumAllocatableRegisters(); ++
i) {
172 if (source_uses_[
i] == 0 && destination_uses_[
i] > 0 &&
i != skip_index) {
173 return Register::FromAllocationIndex(
i);
180 bool LGapResolver::HasBeenReset() {
181 if (!moves_.is_empty())
return false;
182 if (spilled_register_ >= 0)
return false;
184 for (
int i = 0;
i < Register::NumAllocatableRegisters(); ++
i) {
185 if (source_uses_[
i] != 0)
return false;
186 if (destination_uses_[
i] != 0)
return false;
192 void LGapResolver::Verify() {
193 #ifdef ENABLE_SLOW_DCHECKS
195 for (
int i = 0;
i < moves_.length(); ++
i) {
196 LOperand* destination = moves_[
i].destination();
197 for (
int j =
i + 1; j < moves_.length(); ++j) {
198 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
205 #define __ ACCESS_MASM(cgen_->masm())
207 void LGapResolver::Finish() {
208 if (spilled_register_ >= 0) {
209 __ pop(Register::FromAllocationIndex(spilled_register_));
210 spilled_register_ = -1;
216 void LGapResolver::EnsureRestored(LOperand* operand) {
217 if (operand->IsRegister() && operand->index() == spilled_register_) {
218 __ pop(Register::FromAllocationIndex(spilled_register_));
219 spilled_register_ = -1;
224 Register LGapResolver::EnsureTempRegister() {
226 if (spilled_register_ >= 0) {
227 return Register::FromAllocationIndex(spilled_register_);
231 Register free = GetFreeRegisterNot(
no_reg);
232 if (!free.is(
no_reg))
return free;
236 for (
int i = 0;
i < Register::NumAllocatableRegisters(); ++
i) {
237 if (source_uses_[
i] == 0 && destination_uses_[
i] == 0) {
238 Register scratch = Register::FromAllocationIndex(
i);
240 spilled_register_ =
i;
246 Register scratch = Register::FromAllocationIndex(0);
248 spilled_register_ = 0;
253 void LGapResolver::EmitMove(
int index) {
254 LOperand* source = moves_[index].source();
255 LOperand* destination = moves_[index].destination();
256 EnsureRestored(source);
257 EnsureRestored(destination);
261 if (source->IsRegister()) {
262 DCHECK(destination->IsRegister() || destination->IsStackSlot());
263 Register src = cgen_->ToRegister(source);
264 Operand dst = cgen_->ToOperand(destination);
267 }
else if (source->IsStackSlot()) {
268 DCHECK(destination->IsRegister() || destination->IsStackSlot());
269 Operand src = cgen_->ToOperand(source);
270 if (destination->IsRegister()) {
271 Register dst = cgen_->ToRegister(destination);
276 Register tmp = EnsureTempRegister();
277 Operand dst = cgen_->ToOperand(destination);
282 }
else if (source->IsConstantOperand()) {
283 LConstantOperand* constant_source = LConstantOperand::cast(source);
284 if (destination->IsRegister()) {
285 Register dst = cgen_->ToRegister(destination);
286 Representation r = cgen_->IsSmi(constant_source)
287 ? Representation::Smi() : Representation::Integer32();
288 if (cgen_->IsInteger32(constant_source)) {
289 __ Move(dst, cgen_->ToImmediate(constant_source, r));
291 __ LoadObject(dst, cgen_->ToHandle(constant_source));
293 }
else if (destination->IsDoubleRegister()) {
294 double v = cgen_->ToDouble(constant_source);
295 uint64_t int_val = bit_cast<uint64_t, double>(v);
298 XMMRegister dst = cgen_->ToDoubleRegister(destination);
302 __ push(Immediate(upper));
303 __ push(Immediate(lower));
304 __ movsd(dst, Operand(
esp, 0));
308 DCHECK(destination->IsStackSlot());
309 Operand dst = cgen_->ToOperand(destination);
310 Representation r = cgen_->IsSmi(constant_source)
311 ? Representation::Smi() : Representation::Integer32();
312 if (cgen_->IsInteger32(constant_source)) {
313 __ Move(dst, cgen_->ToImmediate(constant_source, r));
315 Register tmp = EnsureTempRegister();
316 __ LoadObject(tmp, cgen_->ToHandle(constant_source));
321 }
else if (source->IsDoubleRegister()) {
322 XMMRegister src = cgen_->ToDoubleRegister(source);
323 if (destination->IsDoubleRegister()) {
324 XMMRegister dst = cgen_->ToDoubleRegister(destination);
327 DCHECK(destination->IsDoubleStackSlot());
328 Operand dst = cgen_->ToOperand(destination);
331 }
else if (source->IsDoubleStackSlot()) {
332 DCHECK(destination->IsDoubleRegister() ||
333 destination->IsDoubleStackSlot());
334 Operand src = cgen_->ToOperand(source);
335 if (destination->IsDoubleRegister()) {
336 XMMRegister dst = cgen_->ToDoubleRegister(destination);
340 Operand dst = cgen_->ToOperand(destination);
352 void LGapResolver::EmitSwap(
int index) {
353 LOperand* source = moves_[index].source();
354 LOperand* destination = moves_[index].destination();
355 EnsureRestored(source);
356 EnsureRestored(destination);
360 if (source->IsRegister() && destination->IsRegister()) {
362 Register src = cgen_->ToRegister(source);
363 Register dst = cgen_->ToRegister(destination);
366 }
else if ((source->IsRegister() && destination->IsStackSlot()) ||
367 (source->IsStackSlot() && destination->IsRegister())) {
371 Register tmp = GetFreeRegisterNot(
no_reg);
373 cgen_->ToRegister(source->IsRegister() ? source : destination);
375 cgen_->ToOperand(source->IsRegister() ? destination : source);
386 }
else if (source->IsStackSlot() && destination->IsStackSlot()) {
389 Register tmp0 = EnsureTempRegister();
390 Register tmp1 = GetFreeRegisterNot(tmp0);
391 Operand src = cgen_->ToOperand(source);
392 Operand dst = cgen_->ToOperand(destination);
406 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
409 XMMRegister src = cgen_->ToDoubleRegister(source);
410 XMMRegister dst = cgen_->ToDoubleRegister(destination);
414 }
else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
417 DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
418 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
422 cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
424 __ movsd(other, reg);
426 }
else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
430 Register tmp = EnsureTempRegister();
431 Operand src0 = cgen_->ToOperand(source);
432 Operand src1 = cgen_->HighOperand(source);
433 Operand dst0 = cgen_->ToOperand(destination);
434 Operand dst1 = cgen_->HighOperand(destination);
454 for (
int i = 0;
i < moves_.length(); ++
i) {
455 LMoveOperands other_move = moves_[
i];
456 if (other_move.Blocks(source)) {
457 moves_[
i].set_source(destination);
458 }
else if (other_move.Blocks(destination)) {
459 moves_[
i].set_source(source);
465 if (source->IsRegister() && destination->IsRegister()) {
466 int temp = source_uses_[source->index()];
467 source_uses_[source->index()] = source_uses_[destination->index()];
468 source_uses_[destination->index()] = temp;
469 }
else if (source->IsRegister()) {
472 source_uses_[source->index()] = CountSourceUses(source);
473 }
else if (destination->IsRegister()) {
474 source_uses_[destination->index()] = CountSourceUses(destination);
#define SLOW_DCHECK(condition)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
bool Equals(Node *a, Node *b)
Debugger support for the V8 JavaScript engine.