V8 Project
lithium-codegen-ia32.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
6 #define V8_IA32_LITHIUM_CODEGEN_IA32_H_
7 
9 
10 #include "src/base/logging.h"
11 #include "src/deoptimizer.h"
13 #include "src/lithium-codegen.h"
14 #include "src/safepoint-table.h"
15 #include "src/scopes.h"
16 #include "src/utils.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 // Forward declarations.
22 class LDeferredCode;
23 class LGapNode;
24 class SafepointGenerator;
25 
26 class LCodeGen: public LCodeGenBase {
27  public:
29  : LCodeGenBase(chunk, assembler, info),
30  deoptimizations_(4, info->zone()),
31  jump_table_(4, info->zone()),
32  deoptimization_literals_(8, info->zone()),
34  scope_(info->scope()),
35  translations_(info->zone()),
36  deferred_(8, info->zone()),
39  osr_pc_offset_(-1),
41  safepoints_(info->zone()),
42  resolver_(this),
43  expected_safepoint_kind_(Safepoint::kSimple) {
45  }
46 
47  int LookupDestination(int block_id) const {
48  return chunk()->LookupDestination(block_id);
49  }
50 
51  bool IsNextEmittedBlock(int block_id) const {
52  return LookupDestination(block_id) == GetNextEmittedBlock();
53  }
54 
55  bool NeedsEagerFrame() const {
56  return GetStackSlotCount() > 0 ||
57  info()->is_non_deferred_calling() ||
58  !info()->IsStub() ||
59  info()->requires_frame();
60  }
61  bool NeedsDeferredFrame() const {
62  return !NeedsEagerFrame() && info()->is_deferred_calling();
63  }
64 
65  // Support for converting LOperands to assembler types.
69 
70  bool IsInteger32(LConstantOperand* op) const;
71  bool IsSmi(LConstantOperand* op) const;
73  return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
74  }
75  double ToDouble(LConstantOperand* op) const;
76 
77  Handle<Object> ToHandle(LConstantOperand* op) const;
78 
79  // The operand denoting the second word (the one with a higher address) of
80  // a double stack slot.
82 
83  // Try to generate code for the entire chunk, but it may fail if the
84  // chunk contains constructs we cannot handle. Returns true if the
85  // code generation attempt succeeded.
86  bool GenerateCode();
87 
88  // Finish the code by setting stack height, safepoint, and bailout
89  // information on it.
91 
92  // Deferred code support.
93  void DoDeferredNumberTagD(LNumberTagD* instr);
94 
97  LOperand* value,
98  LOperand* temp,
99  IntegerSignedness signedness);
100 
101  void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
102  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
103  void DoDeferredStackCheck(LStackCheck* instr);
104  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
105  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
106  void DoDeferredAllocate(LAllocate* instr);
107  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
108  Label* map_check);
109  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
110  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
111  Register object,
112  Register index);
113 
114  // Parallel move support.
115  void DoParallelMove(LParallelMove* move);
116  void DoGap(LGap* instr);
117 
118  // Emit frame translation commands for an environment.
119  void WriteTranslation(LEnvironment* environment, Translation* translation);
120 
122 
123  // Declare methods that deal with the individual node types.
124 #define DECLARE_DO(type) void Do##type(L##type* node);
126 #undef DECLARE_DO
127 
128  private:
129  StrictMode strict_mode() const { return info()->strict_mode(); }
130 
131  Scope* scope() const { return scope_; }
132 
133  XMMRegister double_scratch0() const { return xmm0; }
134 
135  void EmitClassOfTest(Label* if_true,
136  Label* if_false,
137  Handle<String> class_name,
138  Register input,
139  Register temporary,
140  Register temporary2);
141 
142  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
143 
144  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
145 
148 
149  // Code generation passes. Returns true if code generation should
150  // continue.
157 
158  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
160 
164  };
165 
168  LInstruction* instr);
169 
172  LInstruction* instr,
173  SafepointMode safepoint_mode);
174 
176  int argc,
177  LInstruction* instr,
178  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
179 
181  int argc,
182  LInstruction* instr) {
183  const Runtime::Function* function = Runtime::FunctionForId(id);
184  CallRuntime(function, argc, instr);
185  }
186 
188  int argc,
189  LInstruction* instr,
190  LOperand* context);
191 
193 
194  enum EDIState {
197  };
198 
199  // Generate a direct call to a known function. Expects the function
200  // to be in edi.
202  int formal_parameter_count,
203  int arity,
204  LInstruction* instr,
205  EDIState edi_state);
206 
208  SafepointMode safepoint_mode);
209 
211  Safepoint::DeoptMode mode);
212  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
213  Deoptimizer::BailoutType bailout_type);
214  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
215 
217  return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
218  }
219 
220  void AddToTranslation(LEnvironment* environment,
221  Translation* translation,
222  LOperand* op,
223  bool is_tagged,
224  bool is_uint32,
225  int* object_index_pointer,
226  int* dematerialized_index_pointer);
229 
231 
232  Register ToRegister(int index) const;
233  XMMRegister ToDoubleRegister(int index) const;
234  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
235  int32_t ToInteger32(LConstantOperand* op) const;
236  ExternalReference ToExternalReference(LConstantOperand* op) const;
237 
239  LOperand* key,
240  Representation key_representation,
241  ElementsKind elements_kind,
242  uint32_t base_offset);
243 
245  LOperand* index,
246  String::Encoding encoding);
247 
248  void EmitIntegerMathAbs(LMathAbs* instr);
249 
250  // Support for recording safepoint and position information.
251  void RecordSafepoint(LPointerMap* pointers,
252  Safepoint::Kind kind,
253  int arguments,
254  Safepoint::DeoptMode mode);
255  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
256  void RecordSafepoint(Safepoint::DeoptMode mode);
257  void RecordSafepointWithRegisters(LPointerMap* pointers,
258  int arguments,
259  Safepoint::DeoptMode mode);
260 
261  void RecordAndWritePosition(int position) OVERRIDE;
262 
263  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
264  void EmitGoto(int block);
265 
266  // EmitBranch expects to be the last instruction of a block.
267  template<class InstrType>
268  void EmitBranch(InstrType instr, Condition cc);
269  template<class InstrType>
270  void EmitFalseBranch(InstrType instr, Condition cc);
271  void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
273 
274  // Emits optimized code for typeof x == "y". Modifies input register.
275  // Returns the condition on which a final split to
276  // true and false label should be made, to optimize fallthrough.
277  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
278 
279  // Emits optimized code for %_IsObject(x). Preserves input register.
280  // Returns the condition on which a final split to
281  // true and false label should be made, to optimize fallthrough.
283  Register temp1,
284  Label* is_not_object,
285  Label* is_object);
286 
287  // Emits optimized code for %_IsString(x). Preserves input register.
288  // Returns the condition on which a final split to
289  // true and false label should be made, to optimize fallthrough.
291  Register temp1,
292  Label* is_not_string,
293  SmiCheck check_needed);
294 
295  // Emits optimized code for %_IsConstructCall().
296  // Caller should branch on equal condition.
298 
299  // Emits optimized code to deep-copy the contents of statically known
300  // object graphs (e.g. object literal boilerplate).
302  Register result,
303  Register source,
304  int* offset,
306 
307  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
314 
315  template <class T>
317 
318  void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
319 
320  // Emits code for pushing either a tagged constant, a (non-double)
321  // register, or a stack slot operand.
323 
324  friend class LGapResolver;
325 
326 #ifdef _MSC_VER
327  // On windows, you may not access the stack more than one page below
328  // the most recently mapped page. To make the allocated area randomly
329  // accessible, we write an arbitrary value to each page in range
330  // esp + offset - page_size .. esp in turn.
331  void MakeSureStackPagesMapped(int offset);
332 #endif
333 
338  Scope* const scope_;
339  TranslationBuffer translations_;
343  int osr_pc_offset_;
344  bool frame_is_built_;
345 
346  // Builder that keeps track of safepoints in the code. The table
347  // itself is emitted at the end of the generated code.
348  SafepointTableBuilder safepoints_;
349 
350  // Compiler from a set of parallel moves to a sequential list of moves.
352 
353  Safepoint::Kind expected_safepoint_kind_;
354 
355  class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
356  public:
358  : codegen_(codegen) {
359  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
360  codegen_->masm_->PushSafepointRegisters();
361  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
362  DCHECK(codegen_->info()->is_calling());
363  }
364 
366  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
367  codegen_->masm_->PopSafepointRegisters();
368  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
369  }
370 
371  private:
372  LCodeGen* codegen_;
373  };
374 
375  friend class LDeferredCode;
376  friend class LEnvironment;
377  friend class SafepointGenerator;
379 };
380 
381 
382 class LDeferredCode : public ZoneObject {
383  public:
385  : codegen_(codegen),
387  instruction_index_(codegen->current_instruction_) {
388  codegen->AddDeferredCode(this);
389  }
390 
391  virtual ~LDeferredCode() {}
392  virtual void Generate() = 0;
393  virtual LInstruction* instr() = 0;
394 
395  void SetExit(Label* exit) { external_exit_ = exit; }
396  Label* entry() { return &entry_; }
397  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
398  Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
399  int instruction_index() const { return instruction_index_; }
400 
401  protected:
402  LCodeGen* codegen() const { return codegen_; }
403  MacroAssembler* masm() const { return codegen_->masm(); }
404 
405  private:
407  Label entry_;
408  Label exit_;
409  Label* external_exit_;
410  Label done_;
411  int instruction_index_;
412 };
413 
414 } } // namespace v8::internal
415 
416 #endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
Source to read snapshot and builtins files from.
Definition: lithium-arm.h:372
bool IsNextEmittedBlock(int block_id) const
void CallRuntime(const Runtime::Function *fun, int argc, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Condition EmitTypeofIs(LTypeofIsAndBranch *instr, Register input)
Register ToRegister(int index) const
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
void AddDeferredCode(LDeferredCode *code)
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
void EmitDeepCopy(Handle< JSObject > object, Register result, Register source, int *offset, AllocationSiteMode mode)
bool IsSmi(LConstantOperand *op) const
void EmitBranch(InstrType instr, Condition cc)
TranslationBuffer translations_
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
void EmitFalseBranch(InstrType instr, Condition cc)
Operand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
void DoDeferredStackCheck(LStackCheck *instr)
void DeoptimizeIf(Condition cc, LInstruction *instr, const char *detail)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
void DoGap(LGap *instr)
void RecordSafepoint(Safepoint::DeoptMode mode)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
DISALLOW_COPY_AND_ASSIGN(LCodeGen)
void EmitIntegerMathAbs(LMathAbs *instr)
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitPushTaggedOperand(LOperand *operand)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
Handle< Object > ToHandle(LConstantOperand *op) const
ExternalReference ToExternalReference(LConstantOperand *op) const
int LookupDestination(int block_id) const
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void EmitNumberUntagD(LNumberUntagD *instr, Register input, Register temp, XMMRegister result, NumberUntagDMode mode)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register object, Register index)
void DeoptimizeIf(Condition cc, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
XMMRegister ToDoubleRegister(LOperand *op) const
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
void EmitGoto(int block)
Operand HighOperand(LOperand *op)
void RecordSafepoint(LPointerMap *pointers, Safepoint::DeoptMode mode)
StrictMode strict_mode() const
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, EDIState edi_state)
void GenerateBodyInstructionPost(LInstruction *instr) OVERRIDE
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void LoadContextFromDeferred(LOperand *context)
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode)
Operand BuildFastArrayOperand(LOperand *elements_pointer, LOperand *key, Representation key_representation, ElementsKind elements_kind, uint32_t base_offset)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void EnsureRelocSpaceForDeoptimization()
void EmitIsConstructCall(Register temp)
void CallRuntime(Runtime::FunctionId id, int argc, LInstruction *instr)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp, IntegerSignedness signedness)
void DoDeferredTaggedToI(LTaggedToI *instr, Label *done)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
void EmitReturn(LReturn *instr, bool dynamic_frame_alignment)
Immediate ToImmediate(LOperand *op, const Representation &r) const
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Operand ToOperand(LOperand *op) const
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
Condition EmitIsObject(Register input, Register temp1, Label *is_not_object, Label *is_object)
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
XMMRegister double_scratch0() const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
XMMRegister ToDoubleRegister(int index) const
MacroAssembler * masm() const
virtual void Generate()=0
virtual LInstruction * instr()=0
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:9312
#define OVERRIDE
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:20
#define DECLARE_DO(type)
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
const XMMRegister xmm0
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define T(name, string, precedence)
Definition: token.cc:25