V8 Project
code-stubs-arm64.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_CODE_STUBS_ARM64_H_
6 #define V8_ARM64_CODE_STUBS_ARM64_H_
7 
8 namespace v8 {
9 namespace internal {
10 
11 
12 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
13 
14 
15 class StringHelper : public AllStatic {
16  public:
17  // Compares two flat one-byte strings and returns result in x0.
19  MacroAssembler* masm, Register left, Register right, Register scratch1,
20  Register scratch2, Register scratch3, Register scratch4);
21 
22  // Compare two flat one-byte strings for equality and returns result in x0.
24  Register left, Register right,
25  Register scratch1,
26  Register scratch2,
27  Register scratch3);
28 
29  private:
31  MacroAssembler* masm, Register left, Register right, Register length,
32  Register scratch1, Register scratch2, Label* chars_not_equal);
33 
35 };
36 
37 
39  public:
40  explicit StoreRegistersStateStub(Isolate* isolate)
41  : PlatformCodeStub(isolate) {}
42 
43  static Register to_be_pushed_lr() { return ip0; }
44 
45  static void GenerateAheadOfTime(Isolate* isolate);
46 
47  private:
50 };
51 
52 
54  public:
56  : PlatformCodeStub(isolate) {}
57 
58  static void GenerateAheadOfTime(Isolate* isolate);
59 
60  private:
63 };
64 
65 
66 class RecordWriteStub: public PlatformCodeStub {
67  public:
68  // Stub to record the write of 'value' at 'address' in 'object'.
69  // Typically 'address' = 'object' + <some offset>.
70  // See MacroAssembler::RecordWriteField() for example.
72  Register object,
76  SaveFPRegsMode fp_mode)
77  : PlatformCodeStub(isolate),
78  regs_(object, // An input reg.
79  address, // An input reg.
80  value) { // One scratch reg.
81  DCHECK(object.Is64Bits());
84  minor_key_ = ObjectBits::encode(object.code()) |
89  }
90 
92  : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
93 
94  enum Mode {
98  };
99 
100  virtual bool SometimesSetsUpAFrame() { return false; }
101 
102  static Mode GetMode(Code* stub) {
103  // Find the mode depending on the first two instructions.
104  Instruction* instr1 =
105  reinterpret_cast<Instruction*>(stub->instruction_start());
106  Instruction* instr2 = instr1->following();
107 
108  if (instr1->IsUncondBranchImm()) {
109  DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
110  return INCREMENTAL;
111  }
112 
113  DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
114 
115  if (instr2->IsUncondBranchImm()) {
116  return INCREMENTAL_COMPACTION;
117  }
118 
119  DCHECK(instr2->IsPCRelAddressing());
120 
121  return STORE_BUFFER_ONLY;
122  }
123 
124  // We patch the two first instructions of the stub back and forth between an
125  // adr and branch when we start and stop incremental heap marking.
126  // The branch is
127  // b label
128  // The adr is
129  // adr xzr label
130  // so effectively a nop.
131  static void Patch(Code* stub, Mode mode) {
132  // We are going to patch the two first instructions of the stub.
133  PatchingAssembler patcher(
134  reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
135  Instruction* instr1 = patcher.InstructionAt(0);
136  Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
137  // Instructions must be either 'adr' or 'b'.
138  DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
139  DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
140  // Retrieve the offsets to the labels.
141  int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
142  int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
143 
144  switch (mode) {
145  case STORE_BUFFER_ONLY:
146  DCHECK(GetMode(stub) == INCREMENTAL ||
148  patcher.adr(xzr, offset_to_incremental_noncompacting);
149  patcher.adr(xzr, offset_to_incremental_compacting);
150  break;
151  case INCREMENTAL:
152  DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
153  patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
154  patcher.adr(xzr, offset_to_incremental_compacting);
155  break;
157  DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
158  patcher.adr(xzr, offset_to_incremental_noncompacting);
159  patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
160  break;
161  }
162  DCHECK(GetMode(stub) == mode);
163  }
164 
166 
167  private:
168  // This is a helper class to manage the registers associated with the stub.
169  // The 'object' and 'address' registers must be preserved.
170  class RegisterAllocation {
171  public:
174  Register scratch)
175  : object_(object),
176  address_(address),
177  scratch0_(scratch),
180  DCHECK(!AreAliased(scratch, object, address));
181 
182  // The SaveCallerSaveRegisters method needs to save caller-saved
183  // registers, but we don't bother saving MacroAssembler scratch registers.
186 
187  // We would like to require more scratch registers for this stub,
188  // but the number of registers comes down to the ones used in
189  // FullCodeGen::SetVar(), which is architecture independent.
190  // We allocate 2 extra scratch registers that we'll save on the stack.
191  CPURegList pool_available = GetValidRegistersForAllocation();
192  CPURegList used_regs(object, address, scratch);
193  pool_available.Remove(used_regs);
194  scratch1_ = Register(pool_available.PopLowestIndex());
195  scratch2_ = Register(pool_available.PopLowestIndex());
196 
197  // The scratch registers will be restored by other means so we don't need
198  // to save them with the other caller saved registers.
202  }
203 
204  void Save(MacroAssembler* masm) {
205  // We don't have to save scratch0_ because it was given to us as
206  // a scratch register.
207  masm->Push(scratch1_, scratch2_);
208  }
209 
210  void Restore(MacroAssembler* masm) {
211  masm->Pop(scratch2_, scratch1_);
212  }
213 
214  // If we have to call into C then we need to save and restore all caller-
215  // saved registers that were not already preserved.
217  // TODO(all): This can be very expensive, and it is likely that not every
218  // register will need to be preserved. Can we improve this?
220  if (mode == kSaveFPRegs) {
222  }
223  }
224 
226  // TODO(all): This can be very expensive, and it is likely that not every
227  // register will need to be preserved. Can we improve this?
228  if (mode == kSaveFPRegs) {
230  }
231  masm->PopCPURegList(saved_regs_);
232  }
233 
234  Register object() { return object_; }
235  Register address() { return address_; }
239 
240  private:
248 
249  // TODO(all): We should consider moving this somewhere else.
251  // The list of valid registers for allocation is defined as all the
252  // registers without those with a special meaning.
253  //
254  // The default list excludes registers x26 to x31 because they are
255  // reserved for the following purpose:
256  // - x26 root register
257  // - x27 context pointer register
258  // - x28 jssp
259  // - x29 frame pointer
260  // - x30 link register(lr)
261  // - x31 xzr/stack pointer
263 
264  // We also remove MacroAssembler's scratch registers.
266 
267  return list;
268  }
269 
270  friend class RecordWriteStub;
271  };
272 
276  };
277 
278  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
279 
280  virtual void Generate(MacroAssembler* masm) OVERRIDE;
283  MacroAssembler* masm,
285  Mode mode);
287 
288  void Activate(Code* code) {
290  }
291 
292  Register object() const {
293  return Register::from_code(ObjectBits::decode(minor_key_));
294  }
295 
296  Register value() const {
297  return Register::from_code(ValueBits::decode(minor_key_));
298  }
299 
300  Register address() const {
301  return Register::from_code(AddressBits::decode(minor_key_));
302  }
303 
305  return RememberedSetActionBits::decode(minor_key_);
306  }
307 
309  return SaveFPRegsModeBits::decode(minor_key_);
310  }
311 
312  class ObjectBits: public BitField<int, 0, 5> {};
313  class ValueBits: public BitField<int, 5, 5> {};
314  class AddressBits: public BitField<int, 10, 5> {};
315  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
316  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
317 
318  Label slow_;
319  RegisterAllocation regs_;
320 };
321 
322 
323 // Helper to call C++ functions from generated code. The caller must prepare
324 // the exit frame before doing the call with GenerateCall.
325 class DirectCEntryStub: public PlatformCodeStub {
326  public:
327  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
328  void GenerateCall(MacroAssembler* masm, Register target);
329 
330  private:
331  bool NeedsImmovableCode() { return true; }
332 
335 };
336 
337 
339  public:
341 
343  : PlatformCodeStub(isolate) {
344  minor_key_ = LookupModeBits::encode(mode);
345  }
346 
348  Label* miss,
349  Label* done,
350  Register receiver,
351  Register properties,
353  Register scratch0);
354 
356  Label* miss,
357  Label* done,
358  Register elements,
359  Register name,
360  Register scratch1,
361  Register scratch2);
362 
363  virtual bool SometimesSetsUpAFrame() { return false; }
364 
365  private:
366  static const int kInlinedProbes = 4;
367  static const int kTotalProbes = 20;
368 
369  static const int kCapacityOffset =
372 
373  static const int kElementsStartOffset =
376 
377  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
378 
379  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
380 
383 };
384 
385 } } // namespace v8::internal
386 
387 #endif // V8_ARM64_CODE_STUBS_ARM64_H_
#define kCallerSavedFP
void adr(const Register &rd, Label *label)
void b(int branch_offset, Condition cond=al)
Instruction * InstructionAt(int offset) const
static U encode(T value)
Definition: utils.h:217
static T decode(U value)
Definition: utils.h:228
void Remove(const CPURegList &other)
CPURegister PopLowestIndex()
byte * instruction_start()
Definition: objects-inl.h:6176
void GenerateCall(MacroAssembler *masm, Register target)
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub)
Source to read snapshot and builtins files from.
Definition: lithium-arm.h:372
static const int kHeaderSize
Definition: objects.h:2393
static const int kElementsStartIndex
Definition: objects.h:3274
static const int kCapacityIndex
Definition: objects.h:3272
Heap * GetHeap() const
Definition: objects-inl.h:1379
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
Instruction * following(int count=1)
static CPURegList DefaultTmpList()
void PushCPURegList(CPURegList registers)
void PopCPURegList(CPURegList registers)
static CPURegList DefaultFPTmpList()
NameDictionaryLookupStub(Isolate *isolate, LookupMode mode)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register scratch1, Register scratch2)
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
RegisterAllocation(Register object, Register address, Register scratch)
void SaveCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void RestoreCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void GenerateIncremental(MacroAssembler *masm, Mode mode)
void InformIncrementalMarker(MacroAssembler *masm)
RememberedSetAction remembered_set_action() const
SaveFPRegsMode save_fp_regs_mode() const
RecordWriteStub(Isolate *isolate, Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
RecordWriteStub(uint32_t key, Isolate *isolate)
virtual Major MajorKey() const FINAL OVERRIDE
void CheckNeedsToInformIncrementalMarker(MacroAssembler *masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode)
static void Patch(Code *stub, Mode mode)
static Mode GetMode(Code *stub)
virtual void Generate(MacroAssembler *masm) OVERRIDE
DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub)
static void GenerateAheadOfTime(Isolate *isolate)
static void GenerateAheadOfTime(Isolate *isolate)
DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub)
static void GenerateOneByteCharsCompareLoop(MacroAssembler *masm, Register left, Register right, Register length, Register scratch1, Register scratch2, Label *chars_not_equal)
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper)
static void GenerateCompareFlatOneByteStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static void GenerateFlatOneByteStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
#define OVERRIDE
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
#define DCHECK(condition)
Definition: logging.h:205
int int32_t
Definition: unicode.cc:24
const int kPointerSize
Definition: globals.h:129
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const unsigned kXRegSizeInBits
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
const unsigned kInstructionSize
const unsigned kInstructionSizeLog2
const RegList kCallerSaved
Definition: frames-arm.h:50
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static Register from_code(int code)