20 #define kSavedValueRegister kRootRegister
23 LGapResolver::LGapResolver(LCodeGen* owner)
24 : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(
false),
25 saved_destination_(
NULL), need_to_restore_root_(
false) { }
28 #define __ ACCESS_MASM(cgen_->masm())
31 void LGapResolver::Resolve(LParallelMove* parallel_move) {
34 BuildInitialMoveList(parallel_move);
36 for (
int i = 0;
i < moves_.length(); ++
i) {
37 LMoveOperands move = moves_[
i];
41 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
51 for (
int i = 0;
i < moves_.length(); ++
i) {
52 if (!moves_[
i].IsEliminated()) {
53 DCHECK(moves_[
i].source()->IsConstantOperand());
58 if (need_to_restore_root_) {
60 __ InitializeRootRegister();
61 need_to_restore_root_ =
false;
68 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
73 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
74 for (
int i = 0;
i < moves->length(); ++
i) {
75 LMoveOperands move = moves->at(
i);
76 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
82 void LGapResolver::PerformMove(
int index) {
97 DCHECK(!moves_[index].IsPending());
98 DCHECK(!moves_[index].IsRedundant());
104 LOperand* destination = moves_[index].destination();
105 moves_[index].set_destination(
NULL);
111 for (
int i = 0;
i < moves_.length(); ++
i) {
112 LMoveOperands other_move = moves_[
i];
113 if (other_move.Blocks(destination) && !other_move.IsPending()) {
123 moves_[index].set_destination(destination);
128 LMoveOperands other_move = moves_[root_index_];
129 if (other_move.Blocks(destination)) {
130 DCHECK(other_move.IsPending());
140 void LGapResolver::Verify() {
141 #ifdef ENABLE_SLOW_DCHECKS
143 for (
int i = 0;
i < moves_.length(); ++
i) {
144 LOperand* destination = moves_[
i].destination();
145 for (
int j =
i + 1; j < moves_.length(); ++j) {
146 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
153 void LGapResolver::BreakCycle(
int index) {
157 DCHECK(moves_[index].destination()->
Equals(moves_[root_index_].source()));
160 LOperand* source = moves_[index].source();
161 saved_destination_ = moves_[index].destination();
162 if (source->IsRegister()) {
163 need_to_restore_root_ =
true;
165 }
else if (source->IsStackSlot()) {
166 need_to_restore_root_ =
true;
168 }
else if (source->IsDoubleRegister()) {
170 }
else if (source->IsDoubleStackSlot()) {
176 moves_[index].Eliminate();
180 void LGapResolver::RestoreValue() {
184 if (saved_destination_->IsRegister()) {
186 }
else if (saved_destination_->IsStackSlot()) {
188 }
else if (saved_destination_->IsDoubleRegister()) {
190 }
else if (saved_destination_->IsDoubleStackSlot()) {
197 saved_destination_ =
NULL;
201 void LGapResolver::EmitMove(
int index) {
202 LOperand* source = moves_[index].source();
203 LOperand* destination = moves_[index].destination();
208 if (source->IsRegister()) {
209 Register source_register = cgen_->ToRegister(source);
210 if (destination->IsRegister()) {
211 __ mov(cgen_->ToRegister(destination), source_register);
213 DCHECK(destination->IsStackSlot());
214 __ str(source_register, cgen_->ToMemOperand(destination));
216 }
else if (source->IsStackSlot()) {
217 MemOperand source_operand = cgen_->ToMemOperand(source);
218 if (destination->IsRegister()) {
219 __ ldr(cgen_->ToRegister(destination), source_operand);
221 DCHECK(destination->IsStackSlot());
222 MemOperand destination_operand = cgen_->ToMemOperand(destination);
223 if (!destination_operand.OffsetIsUint12Encodable()) {
230 __ ldr(
ip, source_operand);
231 __ str(
ip, destination_operand);
235 }
else if (source->IsConstantOperand()) {
236 LConstantOperand* constant_source = LConstantOperand::cast(source);
237 if (destination->IsRegister()) {
238 Register dst = cgen_->ToRegister(destination);
239 Representation r = cgen_->IsSmi(constant_source)
240 ? Representation::Smi() : Representation::Integer32();
241 if (cgen_->IsInteger32(constant_source)) {
242 __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
244 __ Move(dst, cgen_->ToHandle(constant_source));
246 }
else if (destination->IsDoubleRegister()) {
247 DwVfpRegister result = cgen_->ToDoubleRegister(destination);
248 double v = cgen_->ToDouble(constant_source);
249 __ Vmov(result, v,
ip);
251 DCHECK(destination->IsStackSlot());
253 need_to_restore_root_ =
true;
254 Representation r = cgen_->IsSmi(constant_source)
255 ? Representation::Smi() : Representation::Integer32();
256 if (cgen_->IsInteger32(constant_source)) {
258 Operand(cgen_->ToRepresentation(constant_source, r)));
265 }
else if (source->IsDoubleRegister()) {
266 DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
267 if (destination->IsDoubleRegister()) {
268 __ vmov(cgen_->ToDoubleRegister(destination), source_register);
270 DCHECK(destination->IsDoubleStackSlot());
271 __ vstr(source_register, cgen_->ToMemOperand(destination));
274 }
else if (source->IsDoubleStackSlot()) {
275 MemOperand source_operand = cgen_->ToMemOperand(source);
276 if (destination->IsDoubleRegister()) {
277 __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
279 DCHECK(destination->IsDoubleStackSlot());
280 MemOperand destination_operand = cgen_->ToMemOperand(destination);
296 moves_[index].Eliminate();
#define kScratchDoubleReg
#define SLOW_DCHECK(condition)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define kSavedValueRegister
#define DCHECK(condition)
bool Equals(Node *a, Node *b)
Debugger support for the V8 JavaScript engine.