V8 Project
code-stubs-mips64.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_MIPS_CODE_STUBS_ARM_H_
6 #define V8_MIPS_CODE_STUBS_ARM_H_
7 
8 namespace v8 {
9 namespace internal {
10 
11 
12 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
13 
14 
15 class StringHelper : public AllStatic {
16  public:
17  // Generate code for copying a large number of characters. This function
18  // is allowed to spend extra time setting up conditions to make copying
19  // faster. Copying of overlapping regions is not supported.
20  // Dest register ends at the position after the last character written.
22  Register dest,
23  Register src,
24  Register count,
25  Register scratch,
26  String::Encoding encoding);
27 
28  // Compares two flat one-byte strings and returns result in v0.
30  MacroAssembler* masm, Register left, Register right, Register scratch1,
31  Register scratch2, Register scratch3, Register scratch4);
32 
33  // Compares two flat one-byte strings for equality and returns result in v0.
35  Register left, Register right,
36  Register scratch1,
37  Register scratch2,
38  Register scratch3);
39 
40  private:
42  MacroAssembler* masm, Register left, Register right, Register length,
43  Register scratch1, Register scratch2, Register scratch3,
44  Label* chars_not_equal);
45 
46  private:
48 };
49 
50 
52  public:
53  explicit StoreRegistersStateStub(Isolate* isolate)
54  : PlatformCodeStub(isolate) {}
55 
56  static void GenerateAheadOfTime(Isolate* isolate);
57 
58  private:
61 };
62 
63 
65  public:
67  : PlatformCodeStub(isolate) {}
68 
69  static void GenerateAheadOfTime(Isolate* isolate);
70 
71  private:
74 };
75 
76 // This stub can convert a signed int32 to a heap number (double). It does
77 // not work for int32s that are in Smi range! No GC occurs during this stub
78 // so you don't have to set up the frame.
80  public:
83  Register scratch2)
84  : PlatformCodeStub(isolate) {
85  minor_key_ = IntRegisterBits::encode(the_int.code()) |
88  SignRegisterBits::encode(scratch2.code());
93  }
94 
96 
97  private:
98  void Generate(MacroAssembler* masm);
99 
100  Register the_int() const {
101  return Register::from_code(IntRegisterBits::decode(minor_key_));
102  }
103 
106  }
107 
108  Register scratch() const {
110  }
111 
112  Register sign() const {
113  return Register::from_code(SignRegisterBits::decode(minor_key_));
114  }
115 
116  // Minor key encoding in 16 bits.
117  class IntRegisterBits: public BitField<int, 0, 4> {};
118  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
119  class ScratchRegisterBits: public BitField<int, 8, 4> {};
120  class SignRegisterBits: public BitField<int, 12, 4> {};
121 
123  DEFINE_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
124 };
125 
126 
127 class RecordWriteStub: public PlatformCodeStub {
128  public:
130  Register object,
131  Register value,
134  SaveFPRegsMode fp_mode)
135  : PlatformCodeStub(isolate),
136  regs_(object, // An input reg.
137  address, // An input reg.
138  value) { // One scratch reg.
139  minor_key_ = ObjectBits::encode(object.code()) |
144  }
145 
147  : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
148 
149  enum Mode {
151  INCREMENTAL,
153  };
154 
155  virtual bool SometimesSetsUpAFrame() { return false; }
156 
157  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
158  const unsigned offset = masm->instr_at(pos) & kImm16Mask;
159  masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
160  (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
161  DCHECK(Assembler::IsBne(masm->instr_at(pos)));
162  }
163 
164  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
165  const unsigned offset = masm->instr_at(pos) & kImm16Mask;
166  masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
167  (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
168  DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
169  }
170 
171  static Mode GetMode(Code* stub) {
172  Instr first_instruction = Assembler::instr_at(stub->instruction_start());
173  Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
175 
176  if (Assembler::IsBeq(first_instruction)) {
177  return INCREMENTAL;
178  }
179 
180  DCHECK(Assembler::IsBne(first_instruction));
181 
182  if (Assembler::IsBeq(second_instruction)) {
183  return INCREMENTAL_COMPACTION;
184  }
185 
186  DCHECK(Assembler::IsBne(second_instruction));
187 
188  return STORE_BUFFER_ONLY;
189  }
190 
191  static void Patch(Code* stub, Mode mode) {
192  MacroAssembler masm(NULL,
193  stub->instruction_start(),
194  stub->instruction_size());
195  switch (mode) {
196  case STORE_BUFFER_ONLY:
197  DCHECK(GetMode(stub) == INCREMENTAL ||
199  PatchBranchIntoNop(&masm, 0);
201  break;
202  case INCREMENTAL:
203  DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
204  PatchNopIntoBranch(&masm, 0);
205  break;
207  DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
209  break;
210  }
211  DCHECK(GetMode(stub) == mode);
214  }
215 
217 
218  private:
219  // This is a helper class for freeing up 3 scratch registers. The input is
220  // two registers that must be preserved and one scratch register provided by
221  // the caller.
222  class RegisterAllocation {
223  public:
227  : object_(object),
228  address_(address),
230  DCHECK(!AreAliased(scratch0, object, address, no_reg));
232  }
233 
234  void Save(MacroAssembler* masm) {
236  // We don't have to save scratch0_ because it was given to us as
237  // a scratch register.
238  masm->push(scratch1_);
239  }
240 
241  void Restore(MacroAssembler* masm) {
242  masm->pop(scratch1_);
243  }
244 
245  // If we have to call into C then we need to save and restore all caller-
246  // saved registers that were not already preserved. The scratch registers
247  // will be restored by other means so we don't bother pushing them here.
249  masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
250  if (mode == kSaveFPRegs) {
252  }
253  }
254 
257  if (mode == kSaveFPRegs) {
259  }
260  masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
261  }
262 
263  inline Register object() { return object_; }
264  inline Register address() { return address_; }
265  inline Register scratch0() { return scratch0_; }
266  inline Register scratch1() { return scratch1_; }
267 
268  private:
273 
274  friend class RecordWriteStub;
275  };
276 
280  };
281 
282  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
283 
284  virtual void Generate(MacroAssembler* masm) OVERRIDE;
287  MacroAssembler* masm,
289  Mode mode);
291 
292  void Activate(Code* code) {
294  }
295 
296  Register object() const {
297  return Register::from_code(ObjectBits::decode(minor_key_));
298  }
299 
300  Register value() const {
301  return Register::from_code(ValueBits::decode(minor_key_));
302  }
303 
304  Register address() const {
305  return Register::from_code(AddressBits::decode(minor_key_));
306  }
307 
309  return RememberedSetActionBits::decode(minor_key_);
310  }
311 
313  return SaveFPRegsModeBits::decode(minor_key_);
314  }
315 
316  class ObjectBits: public BitField<int, 0, 5> {};
317  class ValueBits: public BitField<int, 5, 5> {};
318  class AddressBits: public BitField<int, 10, 5> {};
319  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
320  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
321 
322  Label slow_;
323  RegisterAllocation regs_;
324 
326 };
327 
328 
329 // Trampoline stub to call into native code. To call safely into native code
330 // in the presence of compacting GC (which can move code objects) we need to
331 // keep the code which called into native pinned in the memory. Currently the
332 // simplest approach is to generate such stub early enough so it can never be
333 // moved by GC
334 class DirectCEntryStub: public PlatformCodeStub {
335  public:
336  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
337  void GenerateCall(MacroAssembler* masm, Register target);
338 
339  private:
340  bool NeedsImmovableCode() { return true; }
341 
344 };
345 
346 
348  public:
350 
352  : PlatformCodeStub(isolate) {
353  minor_key_ = LookupModeBits::encode(mode);
354  }
355 
357  Label* miss,
358  Label* done,
359  Register receiver,
360  Register properties,
362  Register scratch0);
363 
365  Label* miss,
366  Label* done,
367  Register elements,
368  Register name,
369  Register r0,
370  Register r1);
371 
372  virtual bool SometimesSetsUpAFrame() { return false; }
373 
374  private:
375  static const int kInlinedProbes = 4;
376  static const int kTotalProbes = 20;
377 
378  static const int kCapacityOffset =
381 
382  static const int kElementsStartOffset =
385 
386  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
387 
388  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
389 
392 };
393 
394 
395 } } // namespace v8::internal
396 
397 #endif // V8_MIPS_CODE_STUBS_ARM_H_
static const int kInstrSize
static bool IsBne(Instr instr)
void instr_at_put(int pos, Instr instr)
static bool IsBeq(Instr instr)
static bool is_valid(T value)
Definition: utils.h:212
static U encode(T value)
Definition: utils.h:217
static T decode(U value)
Definition: utils.h:228
int instruction_size() const
byte * instruction_start()
Definition: objects-inl.h:6176
static void FlushICache(void *start, size_t size)
void GenerateCall(MacroAssembler *masm, Register target)
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub)
Source to read snapshot and builtins files from.
Definition: lithium-arm.h:372
static const int kHeaderSize
Definition: objects.h:2393
static const int kElementsStartIndex
Definition: objects.h:3274
static const int kCapacityIndex
Definition: objects.h:3272
Heap * GetHeap() const
Definition: objects-inl.h:1379
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
void MultiPushFPU(RegList regs)
void MultiPush(RegList regs)
void MultiPopFPU(RegList regs)
void MultiPop(RegList regs)
NameDictionaryLookupStub(Isolate *isolate, LookupMode mode)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
void SaveCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
RegisterAllocation(Register object, Register address, Register scratch0)
void RestoreCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void GenerateIncremental(MacroAssembler *masm, Mode mode)
void InformIncrementalMarker(MacroAssembler *masm)
RememberedSetAction remembered_set_action() const
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static void PatchNopIntoBranch(MacroAssembler *masm, int pos)
SaveFPRegsMode save_fp_regs_mode() const
RecordWriteStub(Isolate *isolate, Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
RecordWriteStub(uint32_t key, Isolate *isolate)
virtual Major MajorKey() const FINAL OVERRIDE
void CheckNeedsToInformIncrementalMarker(MacroAssembler *masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode)
DISALLOW_COPY_AND_ASSIGN(RecordWriteStub)
static void Patch(Code *stub, Mode mode)
static Mode GetMode(Code *stub)
virtual void Generate(MacroAssembler *masm) OVERRIDE
DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub)
static void GenerateAheadOfTime(Isolate *isolate)
static void GenerateAheadOfTime(Isolate *isolate)
DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub)
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper)
static void GenerateCompareFlatOneByteStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, String::Encoding encoding)
static void GenerateOneByteCharsCompareLoop(MacroAssembler *masm, Register left, Register right, Register length, Register scratch1, Register scratch2, Register scratch3, Label *chars_not_equal)
static void GenerateFlatOneByteStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
void Generate(MacroAssembler *masm)
DEFINE_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub)
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
WriteInt32ToHeapNumberStub(Isolate *isolate, Register the_int, Register the_heap_number, Register scratch, Register scratch2)
#define OVERRIDE
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
Definition: logging.h:205
const int kPointerSize
Definition: globals.h:129
const int kRtShift
const RegList kJSCallerSaved
Definition: frames-arm.h:24
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register r0
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
const int kRsShift
const Register r1
const Register no_reg
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const RegList kCallerSavedFPU
Definition: frames-mips.h:66
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static Register from_code(int code)