15 LGapResolver::LGapResolver(LCodeGen* owner)
16 : cgen_(owner), moves_(32, owner->zone()) {}
19 void LGapResolver::Resolve(LParallelMove* parallel_move) {
22 BuildInitialMoveList(parallel_move);
24 for (
int i = 0;
i < moves_.length(); ++
i) {
25 LMoveOperands move = moves_[
i];
29 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
35 for (
int i = 0;
i < moves_.length(); ++
i) {
36 if (!moves_[
i].IsEliminated()) {
37 DCHECK(moves_[
i].source()->IsConstantOperand());
46 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
51 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
52 for (
int i = 0;
i < moves->length(); ++
i) {
53 LMoveOperands move = moves->at(
i);
54 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
60 void LGapResolver::PerformMove(
int index) {
68 DCHECK(!moves_[index].IsPending());
69 DCHECK(!moves_[index].IsRedundant());
75 LOperand* destination = moves_[index].destination();
76 moves_[index].set_destination(
NULL);
82 for (
int i = 0;
i < moves_.length(); ++
i) {
83 LMoveOperands other_move = moves_[
i];
84 if (other_move.Blocks(destination) && !other_move.IsPending()) {
100 moves_[index].set_destination(destination);
104 if (moves_[index].source()->
Equals(destination)) {
105 moves_[index].Eliminate();
112 for (
int i = 0;
i < moves_.length(); ++
i) {
113 LMoveOperands other_move = moves_[
i];
114 if (other_move.Blocks(destination)) {
115 DCHECK(other_move.IsPending());
126 void LGapResolver::Verify() {
127 #ifdef ENABLE_SLOW_DCHECKS
129 for (
int i = 0;
i < moves_.length(); ++
i) {
130 LOperand* destination = moves_[
i].destination();
131 for (
int j =
i + 1; j < moves_.length(); ++j) {
132 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
139 #define __ ACCESS_MASM(cgen_->masm())
142 void LGapResolver::EmitMove(
int index) {
143 LOperand* source = moves_[index].source();
144 LOperand* destination = moves_[index].destination();
148 if (source->IsRegister()) {
149 Register src = cgen_->ToRegister(source);
150 if (destination->IsRegister()) {
151 Register dst = cgen_->ToRegister(destination);
154 DCHECK(destination->IsStackSlot());
155 Operand dst = cgen_->ToOperand(destination);
159 }
else if (source->IsStackSlot()) {
160 Operand src = cgen_->ToOperand(source);
161 if (destination->IsRegister()) {
162 Register dst = cgen_->ToRegister(destination);
165 DCHECK(destination->IsStackSlot());
166 Operand dst = cgen_->ToOperand(destination);
171 }
else if (source->IsConstantOperand()) {
172 LConstantOperand* constant_source = LConstantOperand::cast(source);
173 if (destination->IsRegister()) {
174 Register dst = cgen_->ToRegister(destination);
175 if (cgen_->IsSmiConstant(constant_source)) {
176 __ Move(dst, cgen_->ToSmi(constant_source));
177 }
else if (cgen_->IsInteger32Constant(constant_source)) {
178 int32_t constant = cgen_->ToInteger32(constant_source);
181 if (cgen_->IsDehoistedKeyConstant(constant_source)) {
182 __ Set(dst, constant);
187 __ Move(dst, cgen_->ToHandle(constant_source));
189 }
else if (destination->IsDoubleRegister()) {
190 double v = cgen_->ToDouble(constant_source);
191 uint64_t int_val = bit_cast<uint64_t, double>(v);
192 XMMRegister dst = cgen_->ToDoubleRegister(destination);
200 DCHECK(destination->IsStackSlot());
201 Operand dst = cgen_->ToOperand(destination);
202 if (cgen_->IsSmiConstant(constant_source)) {
203 __ Move(dst, cgen_->ToSmi(constant_source));
204 }
else if (cgen_->IsInteger32Constant(constant_source)) {
206 __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
213 }
else if (source->IsDoubleRegister()) {
214 XMMRegister src = cgen_->ToDoubleRegister(source);
215 if (destination->IsDoubleRegister()) {
216 __ movaps(cgen_->ToDoubleRegister(destination), src);
218 DCHECK(destination->IsDoubleStackSlot());
219 __ movsd(cgen_->ToOperand(destination), src);
221 }
else if (source->IsDoubleStackSlot()) {
222 Operand src = cgen_->ToOperand(source);
223 if (destination->IsDoubleRegister()) {
224 __ movsd(cgen_->ToDoubleRegister(destination), src);
226 DCHECK(destination->IsDoubleStackSlot());
228 __ movsd(cgen_->ToOperand(destination),
xmm0);
234 moves_[index].Eliminate();
238 void LGapResolver::EmitSwap(
int index) {
239 LOperand* source = moves_[index].source();
240 LOperand* destination = moves_[index].destination();
244 if (source->IsRegister() && destination->IsRegister()) {
246 Register src = cgen_->ToRegister(source);
247 Register dst = cgen_->ToRegister(destination);
250 }
else if ((source->IsRegister() && destination->IsStackSlot()) ||
251 (source->IsStackSlot() && destination->IsRegister())) {
254 cgen_->ToRegister(source->IsRegister() ? source : destination);
256 cgen_->ToOperand(source->IsRegister() ? destination : source);
261 }
else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
262 (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
264 Operand src = cgen_->ToOperand(source);
265 Operand dst = cgen_->ToOperand(destination);
271 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
273 XMMRegister source_reg = cgen_->ToDoubleRegister(source);
274 XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
275 __ movaps(
xmm0, source_reg);
276 __ movaps(source_reg, destination_reg);
277 __ movaps(destination_reg,
xmm0);
279 }
else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
281 DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
282 (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
283 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
286 LOperand* other = source->IsDoubleRegister() ? destination : source;
287 DCHECK(other->IsDoubleStackSlot());
288 Operand other_operand = cgen_->ToOperand(other);
289 __ movsd(
xmm0, other_operand);
290 __ movsd(other_operand, reg);
300 moves_[index].Eliminate();
305 for (
int i = 0;
i < moves_.length(); ++
i) {
306 LMoveOperands other_move = moves_[
i];
307 if (other_move.Blocks(source)) {
308 moves_[
i].set_source(destination);
309 }
else if (other_move.Blocks(destination)) {
310 moves_[
i].set_source(source);
#define SLOW_DCHECK(condition)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
bool Equals(Node *a, Node *b)
const Register kScratchRegister
Debugger support for the V8 JavaScript engine.