14 #define __ ACCESS_MASM((&masm_))
18 DelayedMasm::EndDelayedUse();
19 if (scratch_register_used()) {
22 InitializeRootRegister();
23 reset_scratch_register_used();
28 LGapResolver::LGapResolver(
LCodeGen* owner)
29 : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()),
30 root_index_(0), in_cycle_(
false), saved_destination_(
NULL) {
34 void LGapResolver::Resolve(LParallelMove* parallel_move) {
39 BuildInitialMoveList(parallel_move);
41 for (
int i = 0;
i < moves_.length(); ++
i) {
42 LMoveOperands move = moves_[
i];
47 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
50 if (in_cycle_) RestoreValue();
55 for (
int i = 0;
i < moves_.length(); ++
i) {
56 LMoveOperands move = moves_[
i];
58 if (!move.IsEliminated()) {
59 DCHECK(move.source()->IsConstantOperand());
70 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
75 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
76 for (
int i = 0;
i < moves->length(); ++
i) {
77 LMoveOperands move = moves->at(
i);
78 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
84 void LGapResolver::PerformMove(
int index) {
89 LMoveOperands& current_move = moves_[index];
91 DCHECK(!current_move.IsPending());
92 DCHECK(!current_move.IsRedundant());
98 LOperand* destination = current_move.destination();
99 current_move.set_destination(
NULL);
105 for (
int i = 0;
i < moves_.length(); ++
i) {
106 LMoveOperands other_move = moves_[
i];
107 if (other_move.Blocks(destination) && !other_move.IsPending()) {
117 current_move.set_destination(destination);
122 LMoveOperands other_move = moves_[root_index_];
123 if (other_move.Blocks(destination)) {
124 DCHECK(other_move.IsPending());
134 void LGapResolver::Verify() {
135 #ifdef ENABLE_SLOW_DCHECKS
137 for (
int i = 0;
i < moves_.length(); ++
i) {
138 LOperand* destination = moves_[
i].destination();
139 for (
int j =
i + 1; j < moves_.length(); ++j) {
140 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
147 void LGapResolver::BreakCycle(
int index) {
148 DCHECK(moves_[index].destination()->
Equals(moves_[root_index_].source()));
155 LOperand* source = moves_[index].source();
156 saved_destination_ = moves_[index].destination();
158 if (source->IsRegister()) {
159 AcquireSavedValueRegister();
160 __ Mov(SavedValueRegister(), cgen_->ToRegister(source));
161 }
else if (source->IsStackSlot()) {
162 AcquireSavedValueRegister();
163 __ Load(SavedValueRegister(), cgen_->ToMemOperand(source));
164 }
else if (source->IsDoubleRegister()) {
165 __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source));
166 }
else if (source->IsDoubleStackSlot()) {
167 __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source));
175 moves_[index].Eliminate();
179 void LGapResolver::RestoreValue() {
183 if (saved_destination_->IsRegister()) {
184 __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister());
185 ReleaseSavedValueRegister();
186 }
else if (saved_destination_->IsStackSlot()) {
187 __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_));
188 ReleaseSavedValueRegister();
189 }
else if (saved_destination_->IsDoubleRegister()) {
190 __ Fmov(cgen_->ToDoubleRegister(saved_destination_),
191 SavedFPValueRegister());
192 }
else if (saved_destination_->IsDoubleStackSlot()) {
193 __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_));
199 saved_destination_ =
NULL;
203 void LGapResolver::EmitMove(
int index) {
204 LOperand* source = moves_[index].source();
205 LOperand* destination = moves_[index].destination();
210 if (source->IsRegister()) {
211 Register source_register = cgen_->ToRegister(source);
212 if (destination->IsRegister()) {
213 __ Mov(cgen_->ToRegister(destination), source_register);
215 DCHECK(destination->IsStackSlot());
216 __ Store(source_register, cgen_->ToMemOperand(destination));
219 }
else if (source->IsStackSlot()) {
220 MemOperand source_operand = cgen_->ToMemOperand(source);
221 if (destination->IsRegister()) {
222 __ Load(cgen_->ToRegister(destination), source_operand);
224 DCHECK(destination->IsStackSlot());
225 EmitStackSlotMove(index);
228 }
else if (source->IsConstantOperand()) {
229 LConstantOperand* constant_source = LConstantOperand::cast(source);
230 if (destination->IsRegister()) {
231 Register dst = cgen_->ToRegister(destination);
232 if (cgen_->IsSmi(constant_source)) {
233 __ Mov(dst, cgen_->ToSmi(constant_source));
234 }
else if (cgen_->IsInteger32Constant(constant_source)) {
235 __ Mov(dst, cgen_->ToInteger32(constant_source));
237 __ LoadObject(dst, cgen_->ToHandle(constant_source));
239 }
else if (destination->IsDoubleRegister()) {
241 __ Fmov(result, cgen_->ToDouble(constant_source));
243 DCHECK(destination->IsStackSlot());
245 if (cgen_->IsSmi(constant_source)) {
246 Smi* smi = cgen_->ToSmi(constant_source);
247 __ StoreConstant(
reinterpret_cast<intptr_t
>(smi),
248 cgen_->ToMemOperand(destination));
249 }
else if (cgen_->IsInteger32Constant(constant_source)) {
250 __ StoreConstant(cgen_->ToInteger32(constant_source),
251 cgen_->ToMemOperand(destination));
253 Handle<Object>
handle = cgen_->ToHandle(constant_source);
257 DCHECK(!obj->IsHeapObject());
258 __ StoreConstant(
reinterpret_cast<intptr_t
>(obj),
259 cgen_->ToMemOperand(destination));
261 AcquireSavedValueRegister();
262 __ LoadObject(SavedValueRegister(),
handle);
263 __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination));
264 ReleaseSavedValueRegister();
269 }
else if (source->IsDoubleRegister()) {
271 if (destination->IsDoubleRegister()) {
272 __ Fmov(cgen_->ToDoubleRegister(destination), src);
274 DCHECK(destination->IsDoubleStackSlot());
275 __ Store(src, cgen_->ToMemOperand(destination));
278 }
else if (source->IsDoubleStackSlot()) {
280 if (destination->IsDoubleRegister()) {
281 __ Load(cgen_->ToDoubleRegister(destination), src);
283 DCHECK(destination->IsDoubleStackSlot());
284 EmitStackSlotMove(index);
292 moves_[index].Eliminate();
#define SLOW_DCHECK(condition)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
bool Equals(Node *a, Node *b)
DwVfpRegister DoubleRegister
kSerializedDataOffset Object
Handle< T > handle(T *t, Isolate *isolate)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Debugger support for the V8 JavaScript engine.