V8 Project
lithium-codegen-mips64.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
6 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
7 
8 #include "src/deoptimizer.h"
9 #include "src/lithium-codegen.h"
12 #include "src/safepoint-table.h"
13 #include "src/scopes.h"
14 #include "src/utils.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 // Forward declarations.
20 class LDeferredCode;
21 class SafepointGenerator;
22 
23 class LCodeGen: public LCodeGenBase {
24  public:
26  : LCodeGenBase(chunk, assembler, info),
27  deoptimizations_(4, info->zone()),
28  jump_table_(4, info->zone()),
29  deoptimization_literals_(8, info->zone()),
31  scope_(info->scope()),
32  translations_(info->zone()),
33  deferred_(8, info->zone()),
34  osr_pc_offset_(-1),
36  safepoints_(info->zone()),
37  resolver_(this),
38  expected_safepoint_kind_(Safepoint::kSimple) {
40  }
41 
42 
43  int LookupDestination(int block_id) const {
44  return chunk()->LookupDestination(block_id);
45  }
46 
47  bool IsNextEmittedBlock(int block_id) const {
48  return LookupDestination(block_id) == GetNextEmittedBlock();
49  }
50 
51  bool NeedsEagerFrame() const {
52  return GetStackSlotCount() > 0 ||
53  info()->is_non_deferred_calling() ||
54  !info()->IsStub() ||
55  info()->requires_frame();
56  }
57  bool NeedsDeferredFrame() const {
58  return !NeedsEagerFrame() && info()->is_deferred_calling();
59  }
60 
61  RAStatus GetRAState() const {
63  }
64 
65  // Support for converting LOperands to assembler types.
66  // LOperand must be a register.
68 
69  // LOperand is loaded into scratch, unless already a register.
71 
72  // LOperand must be a double register.
74 
75  // LOperand is loaded into dbl_scratch, unless already a double register.
77  FloatRegister flt_scratch,
78  DoubleRegister dbl_scratch);
79  int32_t ToRepresentation_donotuse(LConstantOperand* op,
80  const Representation& r) const;
81  int32_t ToInteger32(LConstantOperand* op) const;
82  Smi* ToSmi(LConstantOperand* op) const;
83  double ToDouble(LConstantOperand* op) const;
86  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
88 
89  bool IsInteger32(LConstantOperand* op) const;
90  bool IsSmi(LConstantOperand* op) const;
91  Handle<Object> ToHandle(LConstantOperand* op) const;
92 
93  // Try to generate code for the entire chunk, but it may fail if the
94  // chunk contains constructs we cannot handle. Returns true if the
95  // code generation attempt succeeded.
96  bool GenerateCode();
97 
98  // Finish the code by setting stack height, safepoint, and bailout
99  // information on it.
101 
102  void DoDeferredNumberTagD(LNumberTagD* instr);
103 
106  LOperand* value,
107  LOperand* temp1,
108  LOperand* temp2,
109  IntegerSignedness signedness);
110 
111  void DoDeferredTaggedToI(LTaggedToI* instr);
112  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
113  void DoDeferredStackCheck(LStackCheck* instr);
114  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
115  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
116  void DoDeferredAllocate(LAllocate* instr);
117  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
118  Label* map_check);
119 
120  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
121  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
122  Register result,
123  Register object,
124  Register index);
125 
126  // Parallel move support.
127  void DoParallelMove(LParallelMove* move);
128  void DoGap(LGap* instr);
129 
131  Register base,
132  bool key_is_constant,
133  int constant_key,
134  int element_size,
135  int shift_size,
136  int base_offset);
137 
138  // Emit frame translation commands for an environment.
139  void WriteTranslation(LEnvironment* environment, Translation* translation);
140 
141  // Declare methods that deal with the individual node types.
142 #define DECLARE_DO(type) void Do##type(L##type* node);
144 #undef DECLARE_DO
145 
146  private:
147  StrictMode strict_mode() const { return info()->strict_mode(); }
148 
149  Scope* scope() const { return scope_; }
150 
154 
156 
157  void EmitClassOfTest(Label* if_true,
158  Label* if_false,
159  Handle<String> class_name,
160  Register input,
161  Register temporary,
162  Register temporary2);
163 
164  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
165 
166  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
167 
170 
171  // Code generation passes. Returns true if code generation should
172  // continue.
178 
179  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
181 
185  };
186 
189  LInstruction* instr);
190 
193  LInstruction* instr,
194  SafepointMode safepoint_mode);
195 
196  void CallRuntime(const Runtime::Function* function,
197  int num_arguments,
198  LInstruction* instr,
199  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
200 
202  int num_arguments,
203  LInstruction* instr) {
204  const Runtime::Function* function = Runtime::FunctionForId(id);
205  CallRuntime(function, num_arguments, instr);
206  }
207 
210  int argc,
211  LInstruction* instr,
212  LOperand* context);
213 
214  enum A1State {
217  };
218 
219  // Generate a direct call to a known function. Expects the function
220  // to be in a1.
222  int formal_parameter_count,
223  int arity,
224  LInstruction* instr,
225  A1State a1_state);
226 
228  SafepointMode safepoint_mode);
229 
231  Safepoint::DeoptMode mode);
232  void DeoptimizeIf(Condition condition, LInstruction* instr,
233  Deoptimizer::BailoutType bailout_type, const char* detail,
234  Register src1 = zero_reg,
235  const Operand& src2 = Operand(zero_reg));
236  void DeoptimizeIf(Condition condition, LInstruction* instr,
237  const char* detail, Register src1 = zero_reg,
238  const Operand& src2 = Operand(zero_reg));
239 
240  void AddToTranslation(LEnvironment* environment,
241  Translation* translation,
242  LOperand* op,
243  bool is_tagged,
244  bool is_uint32,
245  int* object_index_pointer,
246  int* dematerialized_index_pointer);
249 
251 
252  Register ToRegister(int index) const;
254 
256  LOperand* index,
257  String::Encoding encoding);
258 
259  void EmitIntegerMathAbs(LMathAbs* instr);
260 
261  // Support for recording safepoint and position information.
262  void RecordSafepoint(LPointerMap* pointers,
263  Safepoint::Kind kind,
264  int arguments,
265  Safepoint::DeoptMode mode);
266  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
267  void RecordSafepoint(Safepoint::DeoptMode mode);
268  void RecordSafepointWithRegisters(LPointerMap* pointers,
269  int arguments,
270  Safepoint::DeoptMode mode);
271 
272  void RecordAndWritePosition(int position) OVERRIDE;
273 
274  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
275  void EmitGoto(int block);
276 
277  // EmitBranch expects to be the last instruction of a block.
278  template<class InstrType>
279  void EmitBranch(InstrType instr,
280  Condition condition,
281  Register src1,
282  const Operand& src2);
283  template<class InstrType>
284  void EmitBranchF(InstrType instr,
285  Condition condition,
286  FPURegister src1,
287  FPURegister src2);
288  template<class InstrType>
289  void EmitFalseBranch(InstrType instr,
290  Condition condition,
291  Register src1,
292  const Operand& src2);
293  template<class InstrType>
294  void EmitFalseBranchF(InstrType instr,
295  Condition condition,
296  FPURegister src1,
297  FPURegister src2);
298  void EmitCmpI(LOperand* left, LOperand* right);
299  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
301 
302  // Emits optimized code for typeof x == "y". Modifies input register.
303  // Returns the condition on which a final split to
304  // true and false label should be made, to optimize fallthrough.
305  // Returns two registers in cmp1 and cmp2 that can be used in the
306  // Branch instruction after EmitTypeofIs.
307  Condition EmitTypeofIs(Label* true_label,
308  Label* false_label,
309  Register input,
310  Handle<String> type_name,
311  Register* cmp1,
312  Operand* cmp2);
313 
314  // Emits optimized code for %_IsObject(x). Preserves input register.
315  // Returns the condition on which a final split to
316  // true and false label should be made, to optimize fallthrough.
318  Register temp1,
319  Register temp2,
320  Label* is_not_object,
321  Label* is_object);
322 
323  // Emits optimized code for %_IsString(x). Preserves input register.
324  // Returns the condition on which a final split to
325  // true and false label should be made, to optimize fallthrough.
327  Register temp1,
328  Label* is_not_string,
329  SmiCheck check_needed);
330 
331  // Emits optimized code for %_IsConstructCall().
332  // Caller should branch on equal condition.
334 
335  // Emits optimized code to deep-copy the contents of statically known
336  // object graphs (e.g. object literal boilerplate).
338  Register result,
339  Register source,
340  int* offset,
342  // Emit optimized code for integer division.
343  // Inputs are signed.
344  // All registers are clobbered.
345  // If 'remainder' is no_reg, it is not computed.
347  Register dividend,
348  int32_t divisor,
349  Register remainder,
350  Register scratch,
351  LEnvironment* environment);
352 
353 
354  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
361 
362  template <class T>
364 
369  Scope* const scope_;
370  TranslationBuffer translations_;
372  int osr_pc_offset_;
373  bool frame_is_built_;
374 
375  // Builder that keeps track of safepoints in the code. The table
376  // itself is emitted at the end of the generated code.
377  SafepointTableBuilder safepoints_;
378 
379  // Compiler from a set of parallel moves to a sequential list of moves.
381 
382  Safepoint::Kind expected_safepoint_kind_;
383 
384  class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
385  public:
387  : codegen_(codegen) {
388  DCHECK(codegen_->info()->is_calling());
389  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
390  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
391 
392  StoreRegistersStateStub stub(codegen_->isolate());
393  codegen_->masm_->push(ra);
394  codegen_->masm_->CallStub(&stub);
395  }
396 
398  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
399  RestoreRegistersStateStub stub(codegen_->isolate());
400  codegen_->masm_->push(ra);
401  codegen_->masm_->CallStub(&stub);
402  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
403  }
404 
405  private:
406  LCodeGen* codegen_;
407  };
408 
409  friend class LDeferredCode;
410  friend class LEnvironment;
411  friend class SafepointGenerator;
413 };
414 
415 
416 class LDeferredCode : public ZoneObject {
417  public:
419  : codegen_(codegen),
421  instruction_index_(codegen->current_instruction_) {
422  codegen->AddDeferredCode(this);
423  }
424 
425  virtual ~LDeferredCode() {}
426  virtual void Generate() = 0;
427  virtual LInstruction* instr() = 0;
428 
429  void SetExit(Label* exit) { external_exit_ = exit; }
430  Label* entry() { return &entry_; }
431  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
432  int instruction_index() const { return instruction_index_; }
433 
434  protected:
435  LCodeGen* codegen() const { return codegen_; }
436  MacroAssembler* masm() const { return codegen_->masm(); }
437 
438  private:
440  Label entry_;
441  Label exit_;
442  Label* external_exit_;
443  int instruction_index_;
444 };
445 
446 } } // namespace v8::internal
447 
448 #endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define kLithiumScratchReg2
#define kLithiumScratchReg
#define kLithiumScratchDouble
Source to read snapshot and builtins files from.
Definition: lithium-arm.h:372
bool IsNextEmittedBlock(int block_id) const
Register ToRegister(int index) const
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
void AddDeferredCode(LDeferredCode *code)
DoubleRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
void EmitBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
void EmitDeepCopy(Handle< JSObject > object, Register result, Register source, int *offset, AllocationSiteMode mode)
bool IsSmi(LConstantOperand *op) const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
void DoDeferredStackCheck(LStackCheck *instr)
Condition EmitTypeofIs(Label *true_label, Label *false_label, Register input, Handle< String > type_name, Register *cmp1, Operand *cmp2)
SafepointTableBuilder safepoints_
LInstruction * GetNextInstruction()
void EmitVectorLoadICRegisters(T *instr)
void EmitFalseBranch(InstrType instr, Condition condition, Register src1, const Operand &src2)
ZoneList< Handle< Object > > deoptimization_literals_
void DoGap(LGap *instr)
void RecordSafepoint(Safepoint::DeoptMode mode)
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int base_offset)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
DISALLOW_COPY_AND_ASSIGN(LCodeGen)
void EmitIntegerMathAbs(LMathAbs *instr)
void EmitBranch(InstrType instr, Condition condition, Register src1, const Operand &src2)
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitIsConstructCall(Register temp1, Register temp2)
int32_t ToRepresentation_donotuse(LConstantOperand *op, const Representation &r) const
void EmitFalseBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
Handle< Object > ToHandle(LConstantOperand *op) const
DoubleRegister EmitLoadDoubleRegister(LOperand *op, FloatRegister flt_scratch, DoubleRegister dbl_scratch)
int LookupDestination(int block_id) const
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void DoDeferredTaggedToI(LTaggedToI *instr)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
void EmitGoto(int block)
DoubleRegister ToDoubleRegister(int index) const
void RecordSafepoint(LPointerMap *pointers, Safepoint::DeoptMode mode)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
StrictMode strict_mode() const
Condition EmitIsObject(Register input, Register temp1, Register temp2, Label *is_not_object, Label *is_object)
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void LoadContextFromDeferred(LOperand *context)
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode)
void EmitSignedIntegerDivisionByConstant(Register result, Register dividend, int32_t divisor, Register remainder, Register scratch, LEnvironment *environment)
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, A1State a1_state)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
Operand ToOperand(LOperand *op)
Register EmitLoadRegister(LOperand *op, Register scratch)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Register src1=zero_reg, const Operand &src2=Operand(zero_reg))
void CallRuntime(Runtime::FunctionId id, int num_arguments, LInstruction *instr)
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void EmitCmpI(LOperand *left, LOperand *right)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
MemOperand ToMemOperand(LOperand *op) const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
void DeoptimizeIf(Condition condition, LInstruction *instr, Deoptimizer::BailoutType bailout_type, const char *detail, Register src1=zero_reg, const Operand &src2=Operand(zero_reg))
MemOperand ToHighMemOperand(LOperand *op) const
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check)
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
Smi * ToSmi(LConstantOperand *op) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
void EmitNumberUntagD(LNumberUntagD *instr, Register input, DoubleRegister result, NumberUntagDMode mode)
MacroAssembler * masm() const
virtual void Generate()=0
virtual LInstruction * instr()=0
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:9312
#define OVERRIDE
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:20
#define DECLARE_DO(type)
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define T(name, string, precedence)
Definition: token.cc:25