13 LGapResolver::LGapResolver(LCodeGen* owner)
15 moves_(32, owner->zone()),
18 saved_destination_(
NULL) {}
21 void LGapResolver::Resolve(LParallelMove* parallel_move) {
24 BuildInitialMoveList(parallel_move);
26 for (
int i = 0;
i < moves_.length(); ++
i) {
27 LMoveOperands move = moves_[
i];
31 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
41 for (
int i = 0;
i < moves_.length(); ++
i) {
42 if (!moves_[
i].IsEliminated()) {
43 DCHECK(moves_[
i].source()->IsConstantOperand());
52 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
57 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
58 for (
int i = 0;
i < moves->length(); ++
i) {
59 LMoveOperands move = moves->at(
i);
60 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
66 void LGapResolver::PerformMove(
int index) {
81 DCHECK(!moves_[index].IsPending());
82 DCHECK(!moves_[index].IsRedundant());
88 LOperand* destination = moves_[index].destination();
89 moves_[index].set_destination(
NULL);
95 for (
int i = 0;
i < moves_.length(); ++
i) {
96 LMoveOperands other_move = moves_[
i];
97 if (other_move.Blocks(destination) && !other_move.IsPending()) {
107 moves_[index].set_destination(destination);
112 LMoveOperands other_move = moves_[root_index_];
113 if (other_move.Blocks(destination)) {
114 DCHECK(other_move.IsPending());
124 void LGapResolver::Verify() {
125 #ifdef ENABLE_SLOW_DCHECKS
127 for (
int i = 0;
i < moves_.length(); ++
i) {
128 LOperand* destination = moves_[
i].destination();
129 for (
int j =
i + 1; j < moves_.length(); ++j) {
130 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
136 #define __ ACCESS_MASM(cgen_->masm())
138 void LGapResolver::BreakCycle(
int index) {
142 DCHECK(moves_[index].destination()->
Equals(moves_[root_index_].source()));
145 LOperand* source = moves_[index].source();
146 saved_destination_ = moves_[index].destination();
147 if (source->IsRegister()) {
149 }
else if (source->IsStackSlot()) {
151 }
else if (source->IsDoubleRegister()) {
153 }
else if (source->IsDoubleStackSlot()) {
159 moves_[index].Eliminate();
163 void LGapResolver::RestoreValue() {
168 if (saved_destination_->IsRegister()) {
170 }
else if (saved_destination_->IsStackSlot()) {
172 }
else if (saved_destination_->IsDoubleRegister()) {
173 __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
175 }
else if (saved_destination_->IsDoubleStackSlot()) {
177 cgen_->ToMemOperand(saved_destination_));
183 saved_destination_ =
NULL;
187 void LGapResolver::EmitMove(
int index) {
188 LOperand* source = moves_[index].source();
189 LOperand* destination = moves_[index].destination();
194 if (source->IsRegister()) {
195 Register source_register = cgen_->ToRegister(source);
196 if (destination->IsRegister()) {
197 __ mov(cgen_->ToRegister(destination), source_register);
199 DCHECK(destination->IsStackSlot());
200 __ sw(source_register, cgen_->ToMemOperand(destination));
202 }
else if (source->IsStackSlot()) {
203 MemOperand source_operand = cgen_->ToMemOperand(source);
204 if (destination->IsRegister()) {
205 __ lw(cgen_->ToRegister(destination), source_operand);
207 DCHECK(destination->IsStackSlot());
208 MemOperand destination_operand = cgen_->ToMemOperand(destination);
210 if (!destination_operand.OffsetIsInt16Encodable()) {
218 __ lw(at, source_operand);
219 __ sw(at, destination_operand);
227 }
else if (source->IsConstantOperand()) {
228 LConstantOperand* constant_source = LConstantOperand::cast(source);
229 if (destination->IsRegister()) {
230 Register dst = cgen_->ToRegister(destination);
231 Representation r = cgen_->IsSmi(constant_source)
232 ? Representation::Smi() : Representation::Integer32();
233 if (cgen_->IsInteger32(constant_source)) {
234 __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
236 __ li(dst, cgen_->ToHandle(constant_source));
238 }
else if (destination->IsDoubleRegister()) {
240 double v = cgen_->ToDouble(constant_source);
243 DCHECK(destination->IsStackSlot());
245 Representation r = cgen_->IsSmi(constant_source)
246 ? Representation::Smi() : Representation::Integer32();
247 if (cgen_->IsInteger32(constant_source)) {
249 Operand(cgen_->ToRepresentation(constant_source, r)));
256 }
else if (source->IsDoubleRegister()) {
258 if (destination->IsDoubleRegister()) {
259 __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
261 DCHECK(destination->IsDoubleStackSlot());
262 MemOperand destination_operand = cgen_->ToMemOperand(destination);
263 __ sdc1(source_register, destination_operand);
266 }
else if (source->IsDoubleStackSlot()) {
267 MemOperand source_operand = cgen_->ToMemOperand(source);
268 if (destination->IsDoubleRegister()) {
269 __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
271 DCHECK(destination->IsDoubleStackSlot());
272 MemOperand destination_operand = cgen_->ToMemOperand(destination);
277 cgen_->ToHighMemOperand(source);
279 cgen_->ToHighMemOperand(destination);
293 moves_[index].Eliminate();
#define kLithiumScratchReg
#define kLithiumScratchDouble
#define SLOW_DCHECK(condition)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
bool Equals(Node *a, Node *b)
DwVfpRegister DoubleRegister
Debugger support for the V8 JavaScript engine.