V8 Project
register-allocator.h
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_REGISTER_ALLOCATOR_H_
6 #define V8_REGISTER_ALLOCATOR_H_
7 
8 #include "src/allocation.h"
10 #include "src/compiler/node.h"
11 #include "src/compiler/schedule.h"
12 #include "src/macro-assembler.h"
13 #include "src/zone.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 // Forward declarations.
19 class BitVector;
20 class InstructionOperand;
21 class UnallocatedOperand;
22 class ParallelMove;
23 class PointerMap;
24 
25 namespace compiler {
26 
31 };
32 
33 
34 // This class represents a single point of a InstructionOperand's lifetime. For
35 // each instruction there are exactly two lifetime positions: the beginning and
36 // the end of the instruction. Lifetime positions for different instructions are
37 // disjoint.
39  public:
40  // Return the lifetime position that corresponds to the beginning of
41  // the instruction with the given index.
43  return LifetimePosition(index * kStep);
44  }
45 
46  // Returns a numeric representation of this lifetime position.
47  int Value() const { return value_; }
48 
49  // Returns the index of the instruction to which this lifetime position
50  // corresponds.
51  int InstructionIndex() const {
52  DCHECK(IsValid());
53  return value_ / kStep;
54  }
55 
56  // Returns true if this lifetime position corresponds to the instruction
57  // start.
58  bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
59 
60  // Returns the lifetime position for the start of the instruction which
61  // corresponds to this lifetime position.
63  DCHECK(IsValid());
64  return LifetimePosition(value_ & ~(kStep - 1));
65  }
66 
67  // Returns the lifetime position for the end of the instruction which
68  // corresponds to this lifetime position.
70  DCHECK(IsValid());
71  return LifetimePosition(InstructionStart().Value() + kStep / 2);
72  }
73 
74  // Returns the lifetime position for the beginning of the next instruction.
76  DCHECK(IsValid());
78  }
79 
80  // Returns the lifetime position for the beginning of the previous
81  // instruction.
83  DCHECK(IsValid());
84  DCHECK(value_ > 1);
86  }
87 
88  // Constructs the lifetime position which does not correspond to any
89  // instruction.
91 
92  // Returns true if this lifetime positions corrensponds to some
93  // instruction.
94  bool IsValid() const { return value_ != -1; }
95 
96  static inline LifetimePosition Invalid() { return LifetimePosition(); }
97 
98  static inline LifetimePosition MaxPosition() {
99  // We have to use this kind of getter instead of static member due to
100  // crash bug in GDB.
101  return LifetimePosition(kMaxInt);
102  }
103 
104  private:
105  static const int kStep = 2;
106 
107  // Code relies on kStep being a power of two.
109 
110  explicit LifetimePosition(int value) : value_(value) {}
111 
112  int value_;
113 };
114 
115 
116 // Representation of the non-empty interval [start,end[.
117 class UseInterval : public ZoneObject {
118  public:
120  : start_(start), end_(end), next_(NULL) {
121  DCHECK(start.Value() < end.Value());
122  }
123 
124  LifetimePosition start() const { return start_; }
125  LifetimePosition end() const { return end_; }
126  UseInterval* next() const { return next_; }
127 
128  // Split this interval at the given position without effecting the
129  // live range that owns it. The interval must contain the position.
130  void SplitAt(LifetimePosition pos, Zone* zone);
131 
132  // If this interval intersects with other return smallest position
133  // that belongs to both of them.
134  LifetimePosition Intersect(const UseInterval* other) const {
135  if (other->start().Value() < start_.Value()) return other->Intersect(this);
136  if (other->start().Value() < end_.Value()) return other->start();
137  return LifetimePosition::Invalid();
138  }
139 
140  bool Contains(LifetimePosition point) const {
141  return start_.Value() <= point.Value() && point.Value() < end_.Value();
142  }
143 
146 
150 };
151 
152 // Representation of a use position.
153 class UsePosition : public ZoneObject {
154  public:
157 
158  InstructionOperand* operand() const { return operand_; }
159  bool HasOperand() const { return operand_ != NULL; }
160 
161  InstructionOperand* hint() const { return hint_; }
162  bool HasHint() const;
163  bool RequiresRegister() const;
164  bool RegisterIsBeneficial() const;
165 
166  LifetimePosition pos() const { return pos_; }
167  UsePosition* next() const { return next_; }
168 
170 
177 };
178 
179 // Representation of SSA values' live ranges as a collection of (continuous)
180 // intervals over the instruction ordering.
181 class LiveRange : public ZoneObject {
182  public:
183  static const int kInvalidAssignment = 0x7fffffff;
184 
185  LiveRange(int id, Zone* zone);
186 
188  UsePosition* first_pos() const { return first_pos_; }
189  LiveRange* parent() const { return parent_; }
190  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
191  LiveRange* next() const { return next_; }
192  bool IsChild() const { return parent() != NULL; }
193  int id() const { return id_; }
194  bool IsFixed() const { return id_ < 0; }
195  bool IsEmpty() const { return first_interval() == NULL; }
197  int assigned_register() const { return assigned_register_; }
198  int spill_start_index() const { return spill_start_index_; }
199  void set_assigned_register(int reg, Zone* zone);
200  void MakeSpilled(Zone* zone);
201  bool is_phi() const { return is_phi_; }
202  void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
203  bool is_non_loop_phi() const { return is_non_loop_phi_; }
206  }
207 
208  // Returns use position in this live range that follows both start
209  // and last processed use position.
210  // Modifies internal state of live range!
212 
213  // Returns use position for which register is required in this live
214  // range and which follows both start and last processed use position
215  // Modifies internal state of live range!
217 
218  // Returns use position for which register is beneficial in this live
219  // range and which follows both start and last processed use position
220  // Modifies internal state of live range!
222 
223  // Returns use position for which register is beneficial in this live
224  // range and which precedes start.
226 
227  // Can this live range be spilled at this position.
228  bool CanBeSpilled(LifetimePosition pos);
229 
230  // Split this live range at the given position which must follow the start of
231  // the range.
232  // All uses following the given position will be moved from this
233  // live range to the result live range.
234  void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
235 
236  RegisterKind Kind() const { return kind_; }
237  bool HasRegisterAssigned() const {
239  }
240  bool IsSpilled() const { return spilled_; }
241 
244  return current_hint_operand_;
245  }
247  UsePosition* pos = first_pos_;
248  while (pos != NULL && !pos->HasHint()) pos = pos->next();
249  if (pos != NULL) return pos->hint();
250  return NULL;
251  }
252 
254  DCHECK(!IsEmpty());
255  return first_interval()->start();
256  }
257 
259  DCHECK(!IsEmpty());
260  return last_interval_->end();
261  }
262 
263  bool HasAllocatedSpillOperand() const;
265  void SetSpillOperand(InstructionOperand* operand);
266 
267  void SetSpillStartIndex(int start) {
269  }
270 
271  bool ShouldBeAllocatedBefore(const LiveRange* other) const;
272  bool CanCover(LifetimePosition position) const;
273  bool Covers(LifetimePosition position);
275 
276  // Add a new interval or a new use position to this live range.
277  void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
278  void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
280  InstructionOperand* hint, Zone* zone);
281 
282  // Shorten the most recently added interval by setting a new start.
283  void ShortenTo(LifetimePosition start);
284 
285 #ifdef DEBUG
286  // True if target overlaps an existing interval.
287  bool HasOverlap(UseInterval* target) const;
288  void Verify() const;
289 #endif
290 
291  private:
292  void ConvertOperands(Zone* zone);
294  void AdvanceLastProcessedMarker(UseInterval* to_start_of,
295  LifetimePosition but_not_past) const;
296 
297  int id_;
298  bool spilled_;
299  bool is_phi_;
308  // This is used as a cache, it doesn't affect correctness.
311  // This is used as a cache, it's invalid outside of BuildLiveRanges.
315 
316  friend class RegisterAllocator; // Assigns to kind_.
317 };
318 
319 
320 class RegisterAllocator BASE_EMBEDDED {
321  public:
323 
324  static void TraceAlloc(const char* msg, ...);
325 
326  // Checks whether the value of a given virtual register is a reference.
327  // TODO(titzer): rename this to IsReference.
328  bool HasTaggedValue(int virtual_register) const;
329 
330  // Returns the register kind required by the given virtual register.
331  RegisterKind RequiredRegisterKind(int virtual_register) const;
332 
333  bool Allocate();
334 
335  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
337  return &fixed_live_ranges_;
338  }
340  return &fixed_double_live_ranges_;
341  }
342 
343  inline InstructionSequence* code() const { return code_; }
344 
345  // This zone is for datastructures only needed during register allocation.
346  inline Zone* zone() { return &zone_; }
347 
348  // This zone is for InstructionOperands and moves that live beyond register
349  // allocation.
350  inline Zone* code_zone() { return code()->zone(); }
351 
353  int vreg = code()->NextVirtualRegister();
355  allocation_ok_ = false;
356  // Maintain the invariant that we return something below the maximum.
357  return 0;
358  }
359  return vreg;
360  }
361 
362  bool AllocationOk() { return allocation_ok_; }
363 
364 #ifdef DEBUG
365  void Verify() const;
366 #endif
367 
368  BitVector* assigned_registers() { return assigned_registers_; }
369  BitVector* assigned_double_registers() { return assigned_double_registers_; }
370 
371  private:
373  void ResolvePhis();
379  void PopulatePointerMaps(); // TODO(titzer): rename to PopulateReferenceMaps.
381  bool CanEagerlyResolveControlFlow(BasicBlock* block) const;
382  inline bool SafePointsAreInOrder() const;
383 
384  // Liveness analysis support.
386  BitVector* ComputeLiveOut(BasicBlock* block);
387  void AddInitialIntervals(BasicBlock* block, BitVector* live_out);
388  bool IsOutputRegisterOf(Instruction* instr, int index);
389  bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
390  void ProcessInstructions(BasicBlock* block, BitVector* live);
391  void MeetRegisterConstraints(BasicBlock* block);
393  int gap_index);
395  void ResolvePhis(BasicBlock* block);
396 
397  // Helper methods for building intervals.
399  bool is_tagged);
401  void Define(LifetimePosition position, InstructionOperand* operand,
402  InstructionOperand* hint);
403  void Use(LifetimePosition block_start, LifetimePosition position,
404  InstructionOperand* operand, InstructionOperand* hint);
407 
408  // Helper methods for updating the life range lists.
409  void AddToActive(LiveRange* range);
410  void AddToInactive(LiveRange* range);
419  void FreeSpillSlot(LiveRange* range);
421 
422  // Helper methods for allocating registers.
425 
426  // Live range splitting helpers.
427 
428  // Split the given range at the given position.
429  // If range starts at or after the given position then the
430  // original range is returned.
431  // Otherwise returns the live range that starts at pos and contains
432  // all uses from the original range that follow pos. Uses at pos will
433  // still be owned by the original range after splitting.
435 
436  // Split the given range in a position from the interval [start, end].
438  LifetimePosition end);
439 
440  // Find a lifetime position in the interval [start, end] which
441  // is optimal for splitting: it is either header of the outermost
442  // loop covered by this interval or the latest possible position.
444  LifetimePosition end);
445 
446  // Spill the given life range after position pos.
448 
449  // Spill the given life range after position [start] and up to position [end].
451  LifetimePosition end);
452 
453  // Spill the given life range after position [start] and up to position [end].
454  // Range is guaranteed to be spilled at least until position [until].
457 
459 
460  // If we are trying to spill a range inside the loop try to
461  // hoist spill position out to the point just before the loop.
463  LifetimePosition pos);
464 
465  void Spill(LiveRange* range);
467 
468  // Helper methods for resolving control flow.
469  void ResolveControlFlow(LiveRange* range, BasicBlock* block,
470  BasicBlock* pred);
471 
472  inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
473 
474  // Return parallel move that should be used to connect ranges split at the
475  // given position.
477 
478  // Return the block which contains give lifetime position.
479  BasicBlock* GetBlock(LifetimePosition pos);
480 
481  // Helper methods for the fixed registers.
482  int RegisterCount() const;
483  static int FixedLiveRangeID(int index) { return -index - 1; }
484  static int FixedDoubleLiveRangeID(int index);
487  LiveRange* LiveRangeFor(int index);
488  GapInstruction* GetLastGap(BasicBlock* block);
489 
490  const char* RegisterName(int allocation_index);
491 
492  inline Instruction* InstructionAt(int index) {
493  return code()->InstructionAt(index);
494  }
495 
498 
499  // During liveness analysis keep a mapping from block id to live_in sets
500  // for blocks already analyzed.
502 
503  // Liveness analysis results.
505 
506  // Lists of live ranges
515 
518 
521 
522  // Indicates success or failure during register allocation.
524 
525 #ifdef DEBUG
526  LifetimePosition allocation_finger_;
527 #endif
528 
529  DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
530 };
531 
532 
534  public:
535  RegisterAllocatorPhase(const char* name, RegisterAllocator* allocator);
537 
538  private:
539  RegisterAllocator* allocator_;
541 
543 };
544 }
545 }
546 } // namespace v8::internal::compiler
547 
548 #endif // V8_REGISTER_ALLOCATOR_H_
RegisterKind RequiredRegisterKind(int virtual_register) const
ZoneList< LiveRange * > inactive_live_ranges_
bool CanEagerlyResolveControlFlow(BasicBlock *block) const
void InactiveToActive(LiveRange *range)
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator)
bool IsOutputRegisterOf(Instruction *instr, int index)
BitVector * ComputeLiveOut(BasicBlock *block)
bool HasTaggedValue(int virtual_register) const
void AddToInactive(LiveRange *range)
void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock *block)
void ResolvePhis(BasicBlock *block)
LiveRange * FixedLiveRangeFor(int index)
LiveRange * SplitRangeAt(LiveRange *range, LifetimePosition pos)
void AddToUnhandledUnsorted(LiveRange *range)
static void TraceAlloc(const char *msg,...)
ZoneList< LiveRange * > active_live_ranges_
const char * RegisterName(int allocation_index)
void AddToActive(LiveRange *range)
void AllocateBlockedReg(LiveRange *range)
void ResolveControlFlow(LiveRange *range, BasicBlock *block, BasicBlock *pred)
InstructionOperand * TryReuseSpillSlot(LiveRange *range)
void SetLiveRangeAssignedRegister(LiveRange *range, int reg)
RegisterAllocator(InstructionSequence *code)
void AddToUnhandledSorted(LiveRange *range)
void Define(LifetimePosition position, InstructionOperand *operand, InstructionOperand *hint)
static int FixedDoubleLiveRangeID(int index)
InstructionSequence * code() const
LifetimePosition FindOptimalSplitPos(LifetimePosition start, LifetimePosition end)
void AddInitialIntervals(BasicBlock *block, BitVector *live_out)
void SpillAfter(LiveRange *range, LifetimePosition pos)
bool IsOutputDoubleRegisterOf(Instruction *instr, int index)
const Vector< LiveRange * > * fixed_double_live_ranges() const
void SpillBetweenUntil(LiveRange *range, LifetimePosition start, LifetimePosition until, LifetimePosition end)
const ZoneList< LiveRange * > * live_ranges() const
LiveRange * LiveRangeFor(int index)
bool IsBlockBoundary(LifetimePosition pos)
void ActiveToInactive(LiveRange *range)
void MeetConstraintsBetween(Instruction *first, Instruction *second, int gap_index)
EmbeddedVector< LiveRange *, Register::kMaxNumAllocatableRegisters > fixed_live_ranges_
BasicBlock * GetBlock(LifetimePosition pos)
ZoneList< LiveRange * > unhandled_live_ranges_
LiveRange * SplitBetween(LiveRange *range, LifetimePosition start, LifetimePosition end)
LifetimePosition FindOptimalSpillingPos(LiveRange *range, LifetimePosition pos)
void ProcessInstructions(BasicBlock *block, BitVector *live)
bool TryAllocateFreeReg(LiveRange *range)
const Vector< LiveRange * > * fixed_live_ranges() const
GapInstruction * GetLastGap(BasicBlock *block)
void SplitAndSpillIntersecting(LiveRange *range)
ParallelMove * GetConnectingParallelMove(LifetimePosition pos)
EmbeddedVector< LiveRange *, DoubleRegister::kMaxNumAllocatableRegisters > fixed_double_live_ranges_
void ActiveToHandled(LiveRange *range)
void FreeSpillSlot(LiveRange *range)
void AddConstraintsGapMove(int index, InstructionOperand *from, InstructionOperand *to)
void MeetRegisterConstraints(BasicBlock *block)
void SpillBetween(LiveRange *range, LifetimePosition start, LifetimePosition end)
void Use(LifetimePosition block_start, LifetimePosition position, InstructionOperand *operand, InstructionOperand *hint)
LiveRange * LiveRangeFor(InstructionOperand *operand)
void InactiveToHandled(LiveRange *range)
InstructionOperand * AllocateFixed(UnallocatedOperand *operand, int pos, bool is_tagged)
LiveRange * FixedDoubleLiveRangeFor(int index)
static LifetimePosition FromInstructionIndex(int index)
STATIC_ASSERT(IS_POWER_OF_TWO(kStep))
void set_assigned_register(int reg, Zone *zone)
void SetSpillOperand(InstructionOperand *operand)
void ShortenTo(LifetimePosition start)
void set_is_non_loop_phi(bool is_non_loop_phi)
UsePosition * NextRegisterPosition(LifetimePosition start)
InstructionOperand * CreateAssignedOperand(Zone *zone)
LifetimePosition FirstIntersection(LiveRange *other)
InstructionOperand * FirstHint() const
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start)
InstructionOperand * current_hint_operand_
UsePosition * PreviousUsePositionRegisterIsBeneficial(LifetimePosition start)
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
UsePosition * NextUsePosition(LifetimePosition start)
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
bool CanBeSpilled(LifetimePosition pos)
InstructionOperand * current_hint_operand() const
bool ShouldBeAllocatedBefore(const LiveRange *other) const
void SplitAt(LifetimePosition position, LiveRange *result, Zone *zone)
bool CanCover(LifetimePosition position) const
InstructionOperand * GetSpillOperand() const
bool Covers(LifetimePosition position)
void AddUsePosition(LifetimePosition pos, InstructionOperand *operand, InstructionOperand *hint, Zone *zone)
void AdvanceLastProcessedMarker(UseInterval *to_start_of, LifetimePosition but_not_past) const
UseInterval * FirstSearchIntervalForPosition(LifetimePosition position) const
RegisterAllocatorPhase(const char *name, RegisterAllocator *allocator)
DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorPhase)
UseInterval(LifetimePosition start, LifetimePosition end)
LifetimePosition Intersect(const UseInterval *other) const
void set_start(LifetimePosition start)
void SplitAt(LifetimePosition pos, Zone *zone)
bool Contains(LifetimePosition point) const
UsePosition(LifetimePosition pos, InstructionOperand *operand, InstructionOperand *hint)
InstructionOperand *const operand_
InstructionOperand * hint() const
InstructionOperand * operand() const
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
Definition: logging.h:205
#define IS_POWER_OF_TWO(x)
Definition: macros.h:325
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
const int kMaxInt
Definition: globals.h:109
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20