V8 Project
lithium-codegen-arm64.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
6 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
7 
9 
11 #include "src/deoptimizer.h"
12 #include "src/lithium-codegen.h"
13 #include "src/safepoint-table.h"
14 #include "src/scopes.h"
15 #include "src/utils.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 // Forward declarations.
21 class LDeferredCode;
22 class SafepointGenerator;
23 class BranchGenerator;
24 
25 class LCodeGen: public LCodeGenBase {
26  public:
28  : LCodeGenBase(chunk, assembler, info),
29  deoptimizations_(4, info->zone()),
30  jump_table_(4, info->zone()),
31  deoptimization_literals_(8, info->zone()),
33  scope_(info->scope()),
34  translations_(info->zone()),
35  deferred_(8, info->zone()),
36  osr_pc_offset_(-1),
38  safepoints_(info->zone()),
39  resolver_(this),
40  expected_safepoint_kind_(Safepoint::kSimple),
44  }
45 
48  }
49 
50  // Simple accessors.
51  Scope* scope() const { return scope_; }
52 
53  int LookupDestination(int block_id) const {
54  return chunk()->LookupDestination(block_id);
55  }
56 
57  bool IsNextEmittedBlock(int block_id) const {
58  return LookupDestination(block_id) == GetNextEmittedBlock();
59  }
60 
61  bool NeedsEagerFrame() const {
62  return GetStackSlotCount() > 0 ||
63  info()->is_non_deferred_calling() ||
64  !info()->IsStub() ||
65  info()->requires_frame();
66  }
67  bool NeedsDeferredFrame() const {
68  return !NeedsEagerFrame() && info()->is_deferred_calling();
69  }
70 
73  }
74 
75  // Try to generate code for the entire chunk, but it may fail if the
76  // chunk contains constructs we cannot handle. Returns true if the
77  // code generation attempt succeeded.
78  bool GenerateCode();
79 
80  // Finish the code by setting stack height, safepoint, and bailout
81  // information on it.
83 
85  // Support for converting LOperands to assembler types.
87  Register ToRegister32(LOperand* op) const;
92  StackMode stack_mode = kCanUseStackPointer) const;
93  Handle<Object> ToHandle(LConstantOperand* op) const;
94 
95  template <class LI>
96  Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
97 
99  return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
100  }
101 
102  // TODO(jbramley): Examine these helpers and check that they make sense.
103  // IsInteger32Constant returns true for smi constants, for example.
104  bool IsInteger32Constant(LConstantOperand* op) const;
105  bool IsSmi(LConstantOperand* op) const;
106 
107  int32_t ToInteger32(LConstantOperand* op) const;
108  Smi* ToSmi(LConstantOperand* op) const;
109  double ToDouble(LConstantOperand* op) const;
111 
112  // Declare methods that deal with the individual node types.
113 #define DECLARE_DO(type) void Do##type(L##type* node);
115 #undef DECLARE_DO
116 
117  private:
118  // Return a double scratch register which can be used locally
119  // when generating code for a lithium instruction.
120  DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
121 
122  // Deferred code support.
123  void DoDeferredNumberTagD(LNumberTagD* instr);
124  void DoDeferredStackCheck(LStackCheck* instr);
125  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
126  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
128  Label* exit,
129  Label* allocation_entry);
130 
132  LOperand* value,
133  LOperand* temp1,
134  LOperand* temp2);
135  void DoDeferredTaggedToI(LTaggedToI* instr,
136  LOperand* value,
137  LOperand* temp1,
138  LOperand* temp2);
139  void DoDeferredAllocate(LAllocate* instr);
140  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
141  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
142  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
143  Register result,
144  Register object,
145  Register index);
146 
147  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
148  void EmitGoto(int block);
149  void DoGap(LGap* instr);
150 
151  // Generic version of EmitBranch. It contains some code to avoid emitting a
152  // branch on the next emitted basic block where we could just fall-through.
153  // You shouldn't use that directly but rather consider one of the helper like
154  // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
155  template<class InstrType>
156  void EmitBranchGeneric(InstrType instr,
157  const BranchGenerator& branch);
158 
159  template<class InstrType>
160  void EmitBranch(InstrType instr, Condition condition);
161 
162  template<class InstrType>
163  void EmitCompareAndBranch(InstrType instr,
164  Condition condition,
165  const Register& lhs,
166  const Operand& rhs);
167 
168  template<class InstrType>
169  void EmitTestAndBranch(InstrType instr,
170  Condition condition,
171  const Register& value,
172  uint64_t mask);
173 
174  template<class InstrType>
175  void EmitBranchIfNonZeroNumber(InstrType instr,
176  const FPRegister& value,
177  const FPRegister& scratch);
178 
179  template<class InstrType>
180  void EmitBranchIfHeapNumber(InstrType instr,
181  const Register& value);
182 
183  template<class InstrType>
184  void EmitBranchIfRoot(InstrType instr,
185  const Register& value,
186  Heap::RootListIndex index);
187 
188  // Emits optimized code to deep-copy the contents of statically known object
189  // graphs (e.g. object literal boilerplate). Expects a pointer to the
190  // allocated destination object in the result register, and a pointer to the
191  // source object in the source register.
193  Register result,
194  Register source,
195  Register scratch,
196  int* offset,
198 
199  template <class T>
201 
202  // Emits optimized code for %_IsString(x). Preserves input register.
203  // Returns the condition on which a final split to
204  // true and false label should be made, to optimize fallthrough.
205  Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
206  SmiCheck check_needed);
207 
211 
213  Register temp,
214  LOperand* index,
215  String::Encoding encoding);
216  void DeoptimizeBranch(LInstruction* instr, const char* detail,
217  BranchType branch_type, Register reg = NoReg,
218  int bit = -1,
219  Deoptimizer::BailoutType* override_bailout_type = NULL);
220  void Deoptimize(LInstruction* instr, const char* detail,
221  Deoptimizer::BailoutType* override_bailout_type = NULL);
222  void DeoptimizeIf(Condition cond, LInstruction* instr, const char* detail);
223  void DeoptimizeIfZero(Register rt, LInstruction* instr, const char* detail);
224  void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
225  const char* detail);
227  const char* detail);
228  void DeoptimizeIfSmi(Register rt, LInstruction* instr, const char* detail);
229  void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, const char* detail);
231  LInstruction* instr, const char* detail);
233  LInstruction* instr, const char* detail);
234  void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
236  const char* detail);
237  void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
238  const char* detail);
239  void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
240  const char* detail);
241 
243  Register base,
244  Register scratch,
245  bool key_is_smi,
246  bool key_is_constant,
247  int constant_key,
248  ElementsKind elements_kind,
249  int base_offset);
251  Register elements,
252  Register key,
253  bool key_is_tagged,
254  ElementsKind elements_kind,
255  Representation representation,
256  int base_offset);
257 
259  Safepoint::DeoptMode mode);
260 
261  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
262 
263  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
264 
265  // Emit frame translation commands for an environment.
266  void WriteTranslation(LEnvironment* environment, Translation* translation);
267 
268  void AddToTranslation(LEnvironment* environment,
269  Translation* translation,
270  LOperand* op,
271  bool is_tagged,
272  bool is_uint32,
273  int* object_index_pointer,
274  int* dematerialized_index_pointer);
275 
278 
279  // Code generation steps. Returns true if code generation should continue.
285 
286  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
288 
292  };
293 
294  void CallCode(Handle<Code> code,
296  LInstruction* instr);
297 
298  void CallCodeGeneric(Handle<Code> code,
300  LInstruction* instr,
301  SafepointMode safepoint_mode);
302 
303  void CallRuntime(const Runtime::Function* function,
304  int num_arguments,
305  LInstruction* instr,
306  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
307 
309  int num_arguments,
310  LInstruction* instr) {
311  const Runtime::Function* function = Runtime::FunctionForId(id);
312  CallRuntime(function, num_arguments, instr);
313  }
314 
317  int argc,
318  LInstruction* instr,
319  LOperand* context);
320 
321  // Generate a direct call to a known function.
322  // If the function is already loaded into x1 by the caller, function_reg may
323  // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
324  // automatically load it.
326  int formal_parameter_count,
327  int arity,
328  LInstruction* instr,
329  Register function_reg = NoReg);
330 
331  // Support for recording safepoint and position information.
332  void RecordAndWritePosition(int position) OVERRIDE;
333  void RecordSafepoint(LPointerMap* pointers,
334  Safepoint::Kind kind,
335  int arguments,
336  Safepoint::DeoptMode mode);
337  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
338  void RecordSafepoint(Safepoint::DeoptMode mode);
339  void RecordSafepointWithRegisters(LPointerMap* pointers,
340  int arguments,
341  Safepoint::DeoptMode mode);
343  SafepointMode safepoint_mode);
344 
345  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
346 
351  Scope* const scope_;
352  TranslationBuffer translations_;
354  int osr_pc_offset_;
355  bool frame_is_built_;
356 
357  // Builder that keeps track of safepoints in the code. The table itself is
358  // emitted at the end of the generated code.
359  SafepointTableBuilder safepoints_;
360 
361  // Compiler from a set of parallel moves to a sequential list of moves.
363 
364  Safepoint::Kind expected_safepoint_kind_;
365 
366  // This flag is true when we are after a push (but before a call).
367  // In this situation, jssp no longer references the end of the stack slots so,
368  // we can only reference a stack slot via fp.
370  // If we have inlined arguments, we are no longer able to use jssp because
371  // jssp is modified and we never know if we are in a block after or before
372  // the pop of the arguments (which restores jssp).
374 
376 
377  class PushSafepointRegistersScope BASE_EMBEDDED {
378  public:
380  : codegen_(codegen) {
381  DCHECK(codegen_->info()->is_calling());
382  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
383  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
384 
385  UseScratchRegisterScope temps(codegen_->masm_);
386  // Preserve the value of lr which must be saved on the stack (the call to
387  // the stub will clobber it).
388  Register to_be_pushed_lr =
390  codegen_->masm_->Mov(to_be_pushed_lr, lr);
391  StoreRegistersStateStub stub(codegen_->isolate());
392  codegen_->masm_->CallStub(&stub);
393  }
394 
396  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
397  RestoreRegistersStateStub stub(codegen_->isolate());
398  codegen_->masm_->CallStub(&stub);
399  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
400  }
401 
402  private:
403  LCodeGen* codegen_;
404  };
405 
406  friend class LDeferredCode;
407  friend class SafepointGenerator;
409 };
410 
411 
412 class LDeferredCode: public ZoneObject {
413  public:
415  : codegen_(codegen),
417  instruction_index_(codegen->current_instruction_) {
418  codegen->AddDeferredCode(this);
419  }
420 
421  virtual ~LDeferredCode() { }
422  virtual void Generate() = 0;
423  virtual LInstruction* instr() = 0;
424 
425  void SetExit(Label* exit) { external_exit_ = exit; }
426  Label* entry() { return &entry_; }
427  Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
428  int instruction_index() const { return instruction_index_; }
429 
430  protected:
431  LCodeGen* codegen() const { return codegen_; }
432  MacroAssembler* masm() const { return codegen_->masm(); }
433 
434  private:
436  Label entry_;
437  Label exit_;
438  Label* external_exit_;
439  int instruction_index_;
440 };
441 
442 
443 // This is the abstract class used by EmitBranchGeneric.
444 // It is used to emit code for conditional branching. The Emit() function
445 // emits code to branch when the condition holds and EmitInverted() emits
446 // the branch when the inverted condition is verified.
447 //
448 // For actual examples of condition see the concrete implementation in
449 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
451  public:
452  explicit BranchGenerator(LCodeGen* codegen)
453  : codegen_(codegen) { }
454 
455  virtual ~BranchGenerator() { }
456 
457  virtual void Emit(Label* label) const = 0;
458  virtual void EmitInverted(Label* label) const = 0;
459 
460  protected:
461  MacroAssembler* masm() const { return codegen_->masm(); }
462 
464 };
465 
466 } } // namespace v8::internal
467 
468 #endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#define BASE_EMBEDDED
Definition: allocation.h:45
virtual void Emit(Label *label) const =0
MacroAssembler * masm() const
virtual void EmitInverted(Label *label) const =0
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction *instr, const char *detail)
bool IsNextEmittedBlock(int block_id) const
void DeoptimizeBranch(LInstruction *instr, const char *detail, BranchType branch_type, Register reg=NoReg, int bit=-1, Deoptimizer::BailoutType *override_bailout_type=NULL)
void AddDeferredCode(LDeferredCode *code)
DoubleRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
bool IsSmi(LConstantOperand *op) const
LinkRegisterStatus GetLinkRegisterState() const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
void DeoptimizeIfZero(Register rt, LInstruction *instr, const char *detail)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
MemOperand PrepareKeyedArrayOperand(Register base, Register elements, Register key, bool key_is_tagged, ElementsKind elements_kind, Representation representation, int base_offset)
void EmitDeepCopy(Handle< JSObject > object, Register result, Register source, Register scratch, int *offset, AllocationSiteMode mode)
void EmitCompareAndBranch(InstrType instr, Condition condition, const Register &lhs, const Operand &rhs)
void DoDeferredStackCheck(LStackCheck *instr)
void DeoptimizeIfSmi(Register rt, LInstruction *instr, const char *detail)
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction *instr, const char *detail)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
void DeoptimizeIfNotZero(Register rt, LInstruction *instr, const char *detail)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
void DoGap(LGap *instr)
void RecordSafepoint(Safepoint::DeoptMode mode)
void DeoptimizeIfNotHeapNumber(Register object, LInstruction *instr)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
Operand ToShiftedRightOperand32(LOperand *right, LI *shift_info)
Operand ToOperand32(LOperand *op)
ZoneList< LEnvironment * > deoptimizations_
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, LInstruction *instr, const char *detail)
DISALLOW_COPY_AND_ASSIGN(LCodeGen)
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
Handle< Object > ToHandle(LConstantOperand *op) const
void DeoptimizeIfNotSmi(Register rt, LInstruction *instr, const char *detail)
int LookupDestination(int block_id) const
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
int JSShiftAmountFromLConstant(LOperand *constant)
void DoDeferredTaggedToI(LTaggedToI *instr)
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
void EmitGoto(int block)
void EmitBranchGeneric(InstrType instr, const BranchGenerator &branch)
void RecordSafepoint(LPointerMap *pointers, Safepoint::DeoptMode mode)
void EmitBranchIfNonZeroNumber(InstrType instr, const FPRegister &value, const FPRegister &scratch)
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void DeoptimizeIfNegative(Register rt, LInstruction *instr, const char *detail)
void LoadContextFromDeferred(LOperand *context)
void DeoptimizeIf(Condition cond, LInstruction *instr, const char *detail)
void DoDeferredNumberTagU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
Register ToRegister32(LOperand *op) const
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTagged(LMathAbsTagged *instr, Label *exit, Label *allocation_entry)
void EmitTestAndBranch(InstrType instr, Condition condition, const Register &value, uint64_t mask)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
Operand ToOperand(LOperand *op)
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction *instr, const char *detail)
double ToDouble(LConstantOperand *op) const
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, LInstruction *instr, const char *detail)
Register ToRegister(LOperand *op) const
void CallRuntime(Runtime::FunctionId id, int num_arguments, LInstruction *instr)
void RecordAndWritePosition(int position) OVERRIDE
void PopulateDeoptimizationData(Handle< Code > code)
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void EmitBranchIfRoot(InstrType instr, const Register &value, Heap::RootListIndex index)
bool IsInteger32Constant(LConstantOperand *op) const
void Deoptimize(LInstruction *instr, const char *detail, Deoptimizer::BailoutType *override_bailout_type=NULL)
void EmitBranchIfHeapNumber(InstrType instr, const Register &value)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
ZoneList< Deoptimizer::JumpTableEntry * > jump_table_
MemOperand PrepareKeyedExternalArrayOperand(Register key, Register base, Register scratch, bool key_is_smi, bool key_is_constant, int constant_key, ElementsKind elements_kind, int base_offset)
MemOperand ToMemOperand(LOperand *op) const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
Smi * ToSmi(LConstantOperand *op) const
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
MacroAssembler * masm() const
virtual void Generate()=0
virtual LInstruction * instr()=0
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:9312
Register UnsafeAcquire(const Register &reg)
#define OVERRIDE
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
Definition: lithium-arm.h:20
#define DECLARE_DO(type)
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
const Register lr
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define T(name, string, precedence)
Definition: token.cc:25