V8 Project
code-stubs-x64.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_X64_CODE_STUBS_X64_H_
6 #define V8_X64_CODE_STUBS_X64_H_
7 
8 namespace v8 {
9 namespace internal {
10 
11 
12 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
13 
14 
15 class StringHelper : public AllStatic {
16  public:
17  // Generate code for copying characters using the rep movs instruction.
18  // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
19  // not supported.
21  Register dest,
22  Register src,
23  Register count,
24  String::Encoding encoding);
25 
26  // Compares two flat one-byte strings and returns result in rax.
28  MacroAssembler* masm, Register left, Register right, Register scratch1,
29  Register scratch2, Register scratch3, Register scratch4);
30 
31  // Compares two flat one-byte strings for equality and returns result in rax.
33  Register left, Register right,
34  Register scratch1,
35  Register scratch2);
36 
37  private:
39  MacroAssembler* masm, Register left, Register right, Register length,
40  Register scratch, Label* chars_not_equal,
41  Label::Distance near_jump = Label::kFar);
42 
44 };
45 
46 
48  public:
50 
53  : PlatformCodeStub(isolate) {
54  minor_key_ = DictionaryBits::encode(dictionary.code()) |
57  }
58 
60  Label* miss,
61  Label* done,
62  Register properties,
64  Register r0);
65 
67  Label* miss,
68  Label* done,
69  Register elements,
70  Register name,
71  Register r0,
72  Register r1);
73 
74  virtual bool SometimesSetsUpAFrame() { return false; }
75 
76  private:
77  static const int kInlinedProbes = 4;
78  static const int kTotalProbes = 20;
79 
80  static const int kCapacityOffset =
83 
84  static const int kElementsStartOffset =
87 
88  Register dictionary() const {
89  return Register::from_code(DictionaryBits::decode(minor_key_));
90  }
91 
92  Register result() const {
93  return Register::from_code(ResultBits::decode(minor_key_));
94  }
95 
96  Register index() const {
97  return Register::from_code(IndexBits::decode(minor_key_));
98  }
99 
100  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
101 
102  class DictionaryBits: public BitField<int, 0, 4> {};
103  class ResultBits: public BitField<int, 4, 4> {};
104  class IndexBits: public BitField<int, 8, 4> {};
105  class LookupModeBits: public BitField<LookupMode, 12, 1> {};
106 
109 };
110 
111 
112 class RecordWriteStub: public PlatformCodeStub {
113  public:
116  SaveFPRegsMode fp_mode)
117  : PlatformCodeStub(isolate),
118  regs_(object, // An input reg.
119  address, // An input reg.
120  value) { // One scratch reg.
121  minor_key_ = ObjectBits::encode(object.code()) |
126  }
127 
129  : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
130 
131  enum Mode {
133  INCREMENTAL,
135  };
136 
137  virtual bool SometimesSetsUpAFrame() { return false; }
138 
139  static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
140  static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
141 
142  static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
143  static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
144 
145  static Mode GetMode(Code* stub) {
146  byte first_instruction = stub->instruction_start()[0];
147  byte second_instruction = stub->instruction_start()[2];
148 
149  if (first_instruction == kTwoByteJumpInstruction) {
150  return INCREMENTAL;
151  }
152 
153  DCHECK(first_instruction == kTwoByteNopInstruction);
154 
155  if (second_instruction == kFiveByteJumpInstruction) {
156  return INCREMENTAL_COMPACTION;
157  }
158 
159  DCHECK(second_instruction == kFiveByteNopInstruction);
160 
161  return STORE_BUFFER_ONLY;
162  }
163 
164  static void Patch(Code* stub, Mode mode) {
165  switch (mode) {
166  case STORE_BUFFER_ONLY:
167  DCHECK(GetMode(stub) == INCREMENTAL ||
171  break;
172  case INCREMENTAL:
173  DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
175  break;
177  DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
180  break;
181  }
182  DCHECK(GetMode(stub) == mode);
184  }
185 
187 
188  private:
189  // This is a helper class for freeing up 3 scratch registers, where the third
190  // is always rcx (needed for shift operations). The input is two registers
191  // that must be preserved and one scratch register provided by the caller.
192  class RegisterAllocation {
193  public:
197  : object_orig_(object),
200  object_(object),
201  address_(address),
203  DCHECK(!AreAliased(scratch0, object, address, no_reg));
205  if (scratch0.is(rcx)) {
207  }
208  if (object.is(rcx)) {
210  }
211  if (address.is(rcx)) {
213  }
215  }
216 
217  void Save(MacroAssembler* masm) {
223  // We don't have to save scratch0_orig_ because it was given to us as
224  // a scratch register. But if we had to switch to a different reg then
225  // we should save the new scratch0_.
226  if (!scratch0_.is(scratch0_orig_)) masm->Push(scratch0_);
227  if (!rcx.is(scratch0_orig_) &&
228  !rcx.is(object_orig_) &&
229  !rcx.is(address_orig_)) {
230  masm->Push(rcx);
231  }
232  masm->Push(scratch1_);
233  if (!address_.is(address_orig_)) {
234  masm->Push(address_);
235  masm->movp(address_, address_orig_);
236  }
237  if (!object_.is(object_orig_)) {
238  masm->Push(object_);
239  masm->movp(object_, object_orig_);
240  }
241  }
242 
243  void Restore(MacroAssembler* masm) {
244  // These will have been preserved the entire time, so we just need to move
245  // them back. Only in one case is the orig_ reg different from the plain
246  // one, since only one of them can alias with rcx.
247  if (!object_.is(object_orig_)) {
248  masm->movp(object_orig_, object_);
249  masm->Pop(object_);
250  }
251  if (!address_.is(address_orig_)) {
252  masm->movp(address_orig_, address_);
253  masm->Pop(address_);
254  }
255  masm->Pop(scratch1_);
256  if (!rcx.is(scratch0_orig_) &&
257  !rcx.is(object_orig_) &&
258  !rcx.is(address_orig_)) {
259  masm->Pop(rcx);
260  }
261  if (!scratch0_.is(scratch0_orig_)) masm->Pop(scratch0_);
262  }
263 
264  // If we have to call into C then we need to save and restore all caller-
265  // saved registers that were not already preserved.
266 
267  // The three scratch registers (incl. rcx) will be restored by other means
268  // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee
269  // save and don't need to be preserved.
272  }
273 
277  }
278 
279  inline Register object() { return object_; }
280  inline Register address() { return address_; }
281  inline Register scratch0() { return scratch0_; }
282  inline Register scratch1() { return scratch1_; }
283 
284  private:
292  // Third scratch register is always rcx.
293 
295  Register r2,
296  Register r3) {
297  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
299  if (candidate.is(rcx)) continue;
300  if (candidate.is(r1)) continue;
301  if (candidate.is(r2)) continue;
302  if (candidate.is(r3)) continue;
303  return candidate;
304  }
305  UNREACHABLE();
306  return no_reg;
307  }
308  friend class RecordWriteStub;
309  };
310 
314  };
315 
316  virtual Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
317 
318  virtual void Generate(MacroAssembler* masm) OVERRIDE;
321  MacroAssembler* masm,
323  Mode mode);
325 
326  void Activate(Code* code) {
328  }
329 
330  Register object() const {
331  return Register::from_code(ObjectBits::decode(minor_key_));
332  }
333 
334  Register value() const {
335  return Register::from_code(ValueBits::decode(minor_key_));
336  }
337 
338  Register address() const {
339  return Register::from_code(AddressBits::decode(minor_key_));
340  }
341 
343  return RememberedSetActionBits::decode(minor_key_);
344  }
345 
347  return SaveFPRegsModeBits::decode(minor_key_);
348  }
349 
350  class ObjectBits: public BitField<int, 0, 4> {};
351  class ValueBits: public BitField<int, 4, 4> {};
352  class AddressBits: public BitField<int, 8, 4> {};
353  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
354  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
355 
356  Label slow_;
357  RegisterAllocation regs_;
358 
360 };
361 
362 
363 } } // namespace v8::internal
364 
365 #endif // V8_X64_CODE_STUBS_X64_H_
void movp(Register dst, void *ptr, RelocInfo::Mode rmode)
static U encode(T value)
Definition: utils.h:217
static T decode(U value)
Definition: utils.h:228
byte * instruction_start()
Definition: objects-inl.h:6176
static void FlushICache(void *start, size_t size)
Source to read snapshot and builtins files from.
Definition: lithium-arm.h:372
static const int kHeaderSize
Definition: objects.h:2393
static const int kElementsStartIndex
Definition: objects.h:3274
static const int kCapacityIndex
Definition: objects.h:3272
Heap * GetHeap() const
Definition: objects-inl.h:1379
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1=no_reg, Register exclusion2=no_reg, Register exclusion3=no_reg)
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
NameDictionaryLookupStub(Isolate *isolate, Register dictionary, Register result, Register index, LookupMode mode)
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub)
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register properties, Handle< Name > name, Register r0)
void SaveCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
Register GetRegThatIsNotRcxOr(Register r1, Register r2, Register r3)
RegisterAllocation(Register object, Register address, Register scratch0)
void RestoreCallerSaveRegisters(MacroAssembler *masm, SaveFPRegsMode mode)
void GenerateIncremental(MacroAssembler *masm, Mode mode)
void InformIncrementalMarker(MacroAssembler *masm)
RememberedSetAction remembered_set_action() const
SaveFPRegsMode save_fp_regs_mode() const
RecordWriteStub(Isolate *isolate, Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
static const byte kTwoByteNopInstruction
RecordWriteStub(uint32_t key, Isolate *isolate)
virtual Major MajorKey() const FINAL OVERRIDE
static const byte kFiveByteJumpInstruction
void CheckNeedsToInformIncrementalMarker(MacroAssembler *masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode)
DISALLOW_COPY_AND_ASSIGN(RecordWriteStub)
static const byte kFiveByteNopInstruction
static void Patch(Code *stub, Mode mode)
static const byte kTwoByteJumpInstruction
virtual bool SometimesSetsUpAFrame()
static Mode GetMode(Code *stub)
virtual void Generate(MacroAssembler *masm) OVERRIDE
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, String::Encoding encoding)
static void GenerateFlatOneByteStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2)
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper)
static void GenerateCompareFlatOneByteStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static void GenerateOneByteCharsCompareLoop(MacroAssembler *masm, Register left, Register right, Register length, Register scratch, Label *chars_not_equal, Label::Distance near_jump=Label::kFar)
#define OVERRIDE
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
const int kPointerSize
Definition: globals.h:129
const Register r2
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register r0
const Register r3
void ArrayNativeCode(MacroAssembler *masm, Label *call_generic_code)
const Register r1
const Register no_reg
const Register rcx
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static Register from_code(int code)
static Register FromAllocationIndex(int index)
bool is(Register reg) const
static int NumAllocatableRegisters()