V8 Project
assembler-arm64.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_H_
7 
8 #include <list>
9 #include <map>
10 #include <vector>
11 
13 #include "src/assembler.h"
14 #include "src/globals.h"
15 #include "src/serialize.h"
16 #include "src/utils.h"
17 
18 
19 namespace v8 {
20 namespace internal {
21 
22 
23 // -----------------------------------------------------------------------------
24 // Registers.
25 #define REGISTER_CODE_LIST(R) \
26 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
27 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
28 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
29 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
30 
31 
32 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
33 
34 
35 // Some CPURegister methods can return Register and FPRegister types, so we
36 // need to declare them in advance.
37 struct Register;
38 struct FPRegister;
39 
40 
41 struct CPURegister {
42  enum RegisterType {
43  // The kInvalid value is used to detect uninitialized static instances,
44  // which are always zero-initialized before any constructors are called.
45  kInvalid = 0,
49  };
50 
51  static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
52  CPURegister r = {code, size, type};
53  return r;
54  }
55 
56  unsigned code() const;
57  RegisterType type() const;
58  RegList Bit() const;
59  unsigned SizeInBits() const;
60  int SizeInBytes() const;
61  bool Is32Bits() const;
62  bool Is64Bits() const;
63  bool IsValid() const;
64  bool IsValidOrNone() const;
65  bool IsValidRegister() const;
66  bool IsValidFPRegister() const;
67  bool IsNone() const;
68  bool Is(const CPURegister& other) const;
69  bool Aliases(const CPURegister& other) const;
70 
71  bool IsZero() const;
72  bool IsSP() const;
73 
74  bool IsRegister() const;
75  bool IsFPRegister() const;
76 
77  Register X() const;
78  Register W() const;
79  FPRegister D() const;
80  FPRegister S() const;
81 
82  bool IsSameSizeAndType(const CPURegister& other) const;
83 
84  // V8 compatibility.
85  bool is(const CPURegister& other) const { return Is(other); }
86  bool is_valid() const { return IsValid(); }
87 
88  unsigned reg_code;
89  unsigned reg_size;
91 };
92 
93 
94 struct Register : public CPURegister {
95  static Register Create(unsigned code, unsigned size) {
97  }
98 
100  reg_code = 0;
101  reg_size = 0;
103  }
104 
105  explicit Register(const CPURegister& r) {
106  reg_code = r.reg_code;
107  reg_size = r.reg_size;
108  reg_type = r.reg_type;
110  }
111 
112  Register(const Register& r) { // NOLINT(runtime/explicit)
113  reg_code = r.reg_code;
114  reg_size = r.reg_size;
115  reg_type = r.reg_type;
117  }
118 
119  bool IsValid() const {
120  DCHECK(IsRegister() || IsNone());
121  return IsValidRegister();
122  }
123 
124  static Register XRegFromCode(unsigned code);
125  static Register WRegFromCode(unsigned code);
126 
127  // Start of V8 compatibility section ---------------------
128  // These memebers are necessary for compilation.
129  // A few of them may be unused for now.
130 
131  static const int kNumRegisters = kNumberOfRegisters;
132  static int NumRegisters() { return kNumRegisters; }
133 
134  // We allow crankshaft to use the following registers:
135  // - x0 to x15
136  // - x18 to x24
137  // - x27 (also context)
138  //
139  // TODO(all): Register x25 is currently free and could be available for
140  // crankshaft, but we don't use it as we might use it as a per function
141  // literal pool pointer in the future.
142  //
143  // TODO(all): Consider storing cp in x25 to have only two ranges.
144  // We split allocatable registers in three ranges called
145  // - "low range"
146  // - "high range"
147  // - "context"
148  static const unsigned kAllocatableLowRangeBegin = 0;
149  static const unsigned kAllocatableLowRangeEnd = 15;
150  static const unsigned kAllocatableHighRangeBegin = 18;
151  static const unsigned kAllocatableHighRangeEnd = 24;
152  static const unsigned kAllocatableContext = 27;
153 
154  // Gap between low and high ranges.
155  static const int kAllocatableRangeGapSize =
157 
158  static const int kMaxNumAllocatableRegisters =
162 
163  // Return true if the register is one that crankshaft can allocate.
164  bool IsAllocatable() const {
165  return ((reg_code == kAllocatableContext) ||
169  }
170 
171  static Register FromAllocationIndex(unsigned index) {
172  DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
173  // cp is the last allocatable register.
174  if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
176  }
177 
178  // Handle low and high ranges.
179  return (index <= kAllocatableLowRangeEnd)
180  ? from_code(index)
182  }
183 
184  static const char* AllocationIndexToString(int index) {
185  DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
187  (kAllocatableLowRangeEnd == 15) &&
188  (kAllocatableHighRangeBegin == 18) &&
189  (kAllocatableHighRangeEnd == 24) &&
190  (kAllocatableContext == 27));
191  const char* const names[] = {
192  "x0", "x1", "x2", "x3", "x4",
193  "x5", "x6", "x7", "x8", "x9",
194  "x10", "x11", "x12", "x13", "x14",
195  "x15", "x18", "x19", "x20", "x21",
196  "x22", "x23", "x24", "x27",
197  };
198  return names[index];
199  }
200 
201  static int ToAllocationIndex(Register reg) {
202  DCHECK(reg.IsAllocatable());
203  unsigned code = reg.code();
204  if (code == kAllocatableContext) {
205  return NumAllocatableRegisters() - 1;
206  }
207 
208  return (code <= kAllocatableLowRangeEnd)
209  ? code
211  }
212 
213  static Register from_code(int code) {
214  // Always return an X register.
216  }
217 
218  // End of V8 compatibility section -----------------------
219 };
220 
221 
222 struct FPRegister : public CPURegister {
223  static FPRegister Create(unsigned code, unsigned size) {
224  return FPRegister(
226  }
227 
229  reg_code = 0;
230  reg_size = 0;
232  }
233 
234  explicit FPRegister(const CPURegister& r) {
235  reg_code = r.reg_code;
236  reg_size = r.reg_size;
237  reg_type = r.reg_type;
239  }
240 
241  FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
242  reg_code = r.reg_code;
243  reg_size = r.reg_size;
244  reg_type = r.reg_type;
246  }
247 
248  bool IsValid() const {
249  DCHECK(IsFPRegister() || IsNone());
250  return IsValidFPRegister();
251  }
252 
253  static FPRegister SRegFromCode(unsigned code);
254  static FPRegister DRegFromCode(unsigned code);
255 
256  // Start of V8 compatibility section ---------------------
258 
259  // Crankshaft can use all the FP registers except:
260  // - d15 which is used to keep the 0 double value
261  // - d30 which is used in crankshaft as a double scratch register
262  // - d31 which is used in the MacroAssembler as a double scratch register
263  static const unsigned kAllocatableLowRangeBegin = 0;
264  static const unsigned kAllocatableLowRangeEnd = 14;
265  static const unsigned kAllocatableHighRangeBegin = 16;
266  static const unsigned kAllocatableHighRangeEnd = 28;
267 
268  static const RegList kAllocatableFPRegisters = 0x1fff7fff;
269 
270  // Gap between low and high ranges.
271  static const int kAllocatableRangeGapSize =
273 
274  static const int kMaxNumAllocatableRegisters =
278 
279  // TODO(turbofan): Proper float32 support.
281  return NumAllocatableRegisters();
282  }
283 
284  // Return true if the register is one that crankshaft can allocate.
285  bool IsAllocatable() const {
286  return (Bit() & kAllocatableFPRegisters) != 0;
287  }
288 
289  static FPRegister FromAllocationIndex(unsigned int index) {
290  DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
291 
292  return (index <= kAllocatableLowRangeEnd)
293  ? from_code(index)
295  }
296 
297  static const char* AllocationIndexToString(int index) {
298  DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
300  (kAllocatableLowRangeEnd == 14) &&
301  (kAllocatableHighRangeBegin == 16) &&
302  (kAllocatableHighRangeEnd == 28));
303  const char* const names[] = {
304  "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
305  "d8", "d9", "d10", "d11", "d12", "d13", "d14",
306  "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
307  "d24", "d25", "d26", "d27", "d28"
308  };
309  return names[index];
310  }
311 
312  static int ToAllocationIndex(FPRegister reg) {
313  DCHECK(reg.IsAllocatable());
314  unsigned code = reg.code();
315 
316  return (code <= kAllocatableLowRangeEnd)
317  ? code
319  }
320 
321  static FPRegister from_code(int code) {
322  // Always return a D register.
324  }
325  // End of V8 compatibility section -----------------------
326 };
327 
328 
329 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
330 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
331 
332 
333 #if defined(ARM64_DEFINE_REG_STATICS)
334 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
335  const CPURegister init_##register_class##_##name = {code, size, type}; \
336  const register_class& name = *reinterpret_cast<const register_class*>( \
337  &init_##register_class##_##name)
338 #define ALIAS_REGISTER(register_class, alias, name) \
339  const register_class& alias = *reinterpret_cast<const register_class*>( \
340  &init_##register_class##_##name)
341 #else
342 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
343  extern const register_class& name
344 #define ALIAS_REGISTER(register_class, alias, name) \
345  extern const register_class& alias
346 #endif // defined(ARM64_DEFINE_REG_STATICS)
347 
348 // No*Reg is used to indicate an unused argument, or an error case. Note that
349 // these all compare equal (using the Is() method). The Register and FPRegister
350 // variants are provided for convenience.
354 
355 // v8 compatibility.
357 
358 #define DEFINE_REGISTERS(N) \
359  INITIALIZE_REGISTER(Register, w##N, N, \
360  kWRegSizeInBits, CPURegister::kRegister); \
361  INITIALIZE_REGISTER(Register, x##N, N, \
362  kXRegSizeInBits, CPURegister::kRegister);
364 #undef DEFINE_REGISTERS
365 
370 
371 #define DEFINE_FPREGISTERS(N) \
372  INITIALIZE_REGISTER(FPRegister, s##N, N, \
373  kSRegSizeInBits, CPURegister::kFPRegister); \
374  INITIALIZE_REGISTER(FPRegister, d##N, N, \
375  kDRegSizeInBits, CPURegister::kFPRegister);
377 #undef DEFINE_FPREGISTERS
378 
379 #undef INITIALIZE_REGISTER
380 
381 // Registers aliases.
386 // Root register.
389 // Context pointer register.
391 // We use a register as a JS stack pointer to overcome the restriction on the
392 // architectural SP alignment.
393 // We chose x28 because it is contiguous with the other specific purpose
394 // registers.
402 
403 // Keeps the 0 double value.
405 // Crankshaft double scratch register.
406 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
407 // MacroAssembler double scratch registers.
411 
412 #undef ALIAS_REGISTER
413 
414 
416  Register reg2 = NoReg,
417  Register reg3 = NoReg,
418  Register reg4 = NoReg);
419 
420 
421 // AreAliased returns true if any of the named registers overlap. Arguments set
422 // to NoReg are ignored. The system stack pointer may be specified.
423 bool AreAliased(const CPURegister& reg1,
424  const CPURegister& reg2,
425  const CPURegister& reg3 = NoReg,
426  const CPURegister& reg4 = NoReg,
427  const CPURegister& reg5 = NoReg,
428  const CPURegister& reg6 = NoReg,
429  const CPURegister& reg7 = NoReg,
430  const CPURegister& reg8 = NoReg);
431 
432 // AreSameSizeAndType returns true if all of the specified registers have the
433 // same size, and are of the same type. The system stack pointer may be
434 // specified. Arguments set to NoReg are ignored, as are any subsequent
435 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
437  const CPURegister& reg2,
438  const CPURegister& reg3 = NoCPUReg,
439  const CPURegister& reg4 = NoCPUReg,
440  const CPURegister& reg5 = NoCPUReg,
441  const CPURegister& reg6 = NoCPUReg,
442  const CPURegister& reg7 = NoCPUReg,
443  const CPURegister& reg8 = NoCPUReg);
444 
445 
446 typedef FPRegister DoubleRegister;
447 
448 
449 // -----------------------------------------------------------------------------
450 // Lists of registers.
451 class CPURegList {
452  public:
453  explicit CPURegList(CPURegister reg1,
454  CPURegister reg2 = NoCPUReg,
455  CPURegister reg3 = NoCPUReg,
456  CPURegister reg4 = NoCPUReg)
457  : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
458  size_(reg1.SizeInBits()), type_(reg1.type()) {
459  DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
460  DCHECK(IsValid());
461  }
462 
464  : list_(list), size_(size), type_(type) {
465  DCHECK(IsValid());
466  }
467 
469  unsigned first_reg, unsigned last_reg)
470  : size_(size), type_(type) {
472  (last_reg < kNumberOfRegisters)) ||
474  (last_reg < kNumberOfFPRegisters)));
475  DCHECK(last_reg >= first_reg);
476  list_ = (1UL << (last_reg + 1)) - 1;
477  list_ &= ~((1UL << first_reg) - 1);
478  DCHECK(IsValid());
479  }
480 
482  DCHECK(IsValid());
483  return type_;
484  }
485 
486  RegList list() const {
487  DCHECK(IsValid());
488  return list_;
489  }
490 
491  inline void set_list(RegList new_list) {
492  DCHECK(IsValid());
493  list_ = new_list;
494  }
495 
496  // Combine another CPURegList into this one. Registers that already exist in
497  // this list are left unchanged. The type and size of the registers in the
498  // 'other' list must match those in this list.
499  void Combine(const CPURegList& other);
500 
501  // Remove every register in the other CPURegList from this one. Registers that
502  // do not exist in this list are ignored. The type of the registers in the
503  // 'other' list must match those in this list.
504  void Remove(const CPURegList& other);
505 
506  // Variants of Combine and Remove which take CPURegisters.
507  void Combine(const CPURegister& other);
508  void Remove(const CPURegister& other1,
509  const CPURegister& other2 = NoCPUReg,
510  const CPURegister& other3 = NoCPUReg,
511  const CPURegister& other4 = NoCPUReg);
512 
513  // Variants of Combine and Remove which take a single register by its code;
514  // the type and size of the register is inferred from this list.
515  void Combine(int code);
516  void Remove(int code);
517 
518  // Remove all callee-saved registers from the list. This can be useful when
519  // preparing registers for an AAPCS64 function call, for example.
521 
524 
525  // AAPCS64 callee-saved registers.
528 
529  // AAPCS64 caller-saved registers. Note that this includes lr.
532 
533  // Registers saved as safepoints.
535 
536  bool IsEmpty() const {
537  DCHECK(IsValid());
538  return list_ == 0;
539  }
540 
541  bool IncludesAliasOf(const CPURegister& other1,
542  const CPURegister& other2 = NoCPUReg,
543  const CPURegister& other3 = NoCPUReg,
544  const CPURegister& other4 = NoCPUReg) const {
545  DCHECK(IsValid());
546  RegList list = 0;
547  if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
548  if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
549  if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
550  if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
551  return (list_ & list) != 0;
552  }
553 
554  int Count() const {
555  DCHECK(IsValid());
557  }
558 
559  unsigned RegisterSizeInBits() const {
560  DCHECK(IsValid());
561  return size_;
562  }
563 
564  unsigned RegisterSizeInBytes() const {
565  int size_in_bits = RegisterSizeInBits();
566  DCHECK((size_in_bits % kBitsPerByte) == 0);
567  return size_in_bits / kBitsPerByte;
568  }
569 
570  unsigned TotalSizeInBytes() const {
571  DCHECK(IsValid());
572  return RegisterSizeInBytes() * Count();
573  }
574 
575  private:
577  unsigned size_;
579 
580  bool IsValid() const {
581  const RegList kValidRegisters = 0x8000000ffffffff;
582  const RegList kValidFPRegisters = 0x0000000ffffffff;
583  switch (type_) {
585  return (list_ & kValidRegisters) == list_;
587  return (list_ & kValidFPRegisters) == list_;
589  return list_ == 0;
590  default:
591  UNREACHABLE();
592  return false;
593  }
594  }
595 };
596 
597 
598 // AAPCS64 callee-saved registers.
599 #define kCalleeSaved CPURegList::GetCalleeSaved()
600 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
601 
602 
603 // AAPCS64 caller-saved registers. Note that this includes lr.
604 #define kCallerSaved CPURegList::GetCallerSaved()
605 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
606 
607 // -----------------------------------------------------------------------------
608 // Immediates.
609 class Immediate {
610  public:
611  template<typename T>
612  inline explicit Immediate(Handle<T> handle);
613 
614  // This is allowed to be an implicit constructor because Immediate is
615  // a wrapper class that doesn't normally perform any type conversion.
616  template<typename T>
617  inline Immediate(T value); // NOLINT(runtime/explicit)
618 
619  template<typename T>
621 
622  int64_t value() const { return value_; }
623  RelocInfo::Mode rmode() const { return rmode_; }
624 
625  private:
627 
628  int64_t value_;
630 };
631 
632 
633 // -----------------------------------------------------------------------------
634 // Operands.
636 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
637 
638 // Represents an operand in a machine instruction.
639 class Operand {
640  // TODO(all): If necessary, study more in details which methods
641  // TODO(all): should be inlined or not.
642  public:
643  // rm, {<shift> {#<shift_amount>}}
644  // where <shift> is one of {LSL, LSR, ASR, ROR}.
645  // <shift_amount> is uint6_t.
646  // This is allowed to be an implicit constructor because Operand is
647  // a wrapper class that doesn't normally perform any type conversion.
648  inline Operand(Register reg,
649  Shift shift = LSL,
650  unsigned shift_amount = 0); // NOLINT(runtime/explicit)
651 
652  // rm, <extend> {#<shift_amount>}
653  // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
654  // <shift_amount> is uint2_t.
655  inline Operand(Register reg,
656  Extend extend,
657  unsigned shift_amount = 0);
658 
659  template<typename T>
660  inline explicit Operand(Handle<T> handle);
661 
662  // Implicit constructor for all int types, ExternalReference, and Smi.
663  template<typename T>
664  inline Operand(T t); // NOLINT(runtime/explicit)
665 
666  // Implicit constructor for int types.
667  template<typename T>
668  inline Operand(T t, RelocInfo::Mode rmode);
669 
670  inline bool IsImmediate() const;
671  inline bool IsShiftedRegister() const;
672  inline bool IsExtendedRegister() const;
673  inline bool IsZero() const;
674 
675  // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
676  // which helps in the encoding of instructions that use the stack pointer.
677  inline Operand ToExtendedRegister() const;
678 
679  inline Immediate immediate() const;
680  inline int64_t ImmediateValue() const;
681  inline Register reg() const;
682  inline Shift shift() const;
683  inline Extend extend() const;
684  inline unsigned shift_amount() const;
685 
686  // Relocation information.
687  bool NeedsRelocation(const Assembler* assembler) const;
688 
689  // Helpers
690  inline static Operand UntagSmi(Register smi);
691  inline static Operand UntagSmiAndScale(Register smi, int scale);
692 
693  private:
698  unsigned shift_amount_;
699 };
700 
701 
702 // MemOperand represents a memory operand in a load or store instruction.
703 class MemOperand {
704  public:
705  inline MemOperand();
706  inline explicit MemOperand(Register base,
707  int64_t offset = 0,
709  inline explicit MemOperand(Register base,
711  Shift shift = LSL,
712  unsigned shift_amount = 0);
713  inline explicit MemOperand(Register base,
715  Extend extend,
716  unsigned shift_amount = 0);
717  inline explicit MemOperand(Register base,
718  const Operand& offset,
720 
721  const Register& base() const { return base_; }
722  const Register& regoffset() const { return regoffset_; }
723  int64_t offset() const { return offset_; }
724  AddrMode addrmode() const { return addrmode_; }
725  Shift shift() const { return shift_; }
726  Extend extend() const { return extend_; }
727  unsigned shift_amount() const { return shift_amount_; }
728  inline bool IsImmediateOffset() const;
729  inline bool IsRegisterOffset() const;
730  inline bool IsPreIndex() const;
731  inline bool IsPostIndex() const;
732 
733  // For offset modes, return the offset as an Operand. This helper cannot
734  // handle indexed modes.
735  inline Operand OffsetAsOperand() const;
736 
737  enum PairResult {
738  kNotPair, // Can't use a pair instruction.
739  kPairAB, // Can use a pair instruction (operandA has lower address).
740  kPairBA // Can use a pair instruction (operandB has lower address).
741  };
742  // Check if two MemOperand are consistent for stp/ldp use.
744  const MemOperand& operandB,
745  int access_size_log2 = kXRegSizeLog2);
746 
747  private:
750  int64_t offset_;
754  unsigned shift_amount_;
755 };
756 
757 
758 class ConstPool {
759  public:
760  explicit ConstPool(Assembler* assm)
761  : assm_(assm),
762  first_use_(-1),
764  void RecordEntry(intptr_t data, RelocInfo::Mode mode);
765  int EntryCount() const {
766  return shared_entries_count + unique_entries_.size();
767  }
768  bool IsEmpty() const {
769  return shared_entries_.empty() && unique_entries_.empty();
770  }
771  // Distance in bytes between the current pc and the first instruction
772  // using the pool. If there are no pending entries return kMaxInt.
774  // Offset after which instructions using the pool will be out of range.
775  int MaxPcOffset();
776  // Maximum size the constant pool can be with current entries. It always
777  // includes alignment padding and branch over.
779  // Size in bytes of the literal pool *if* it is emitted at the current
780  // pc. The size will include the branch over the pool if it was requested.
781  int SizeIfEmittedAtCurrentPc(bool require_jump);
782  // Emit the literal pool at the current pc with a branch over the pool if
783  // requested.
784  void Emit(bool require_jump);
785  // Discard any pending pool entries.
786  void Clear();
787 
788  private:
790  void EmitMarker();
791  void EmitGuard();
792  void EmitEntries();
793 
795  // Keep track of the first instruction requiring a constant pool entry
796  // since the previous constant pool was emitted.
798  // values, pc offset(s) of entries which can be shared.
799  std::multimap<uint64_t, int> shared_entries_;
800  // Number of distinct literal in shared entries.
802  // values, pc offset of entries which cannot be shared.
803  std::vector<std::pair<uint64_t, int> > unique_entries_;
804 };
805 
806 
807 // -----------------------------------------------------------------------------
808 // Assembler.
809 
810 class Assembler : public AssemblerBase {
811  public:
812  // Create an assembler. Instructions and relocation information are emitted
813  // into a buffer, with the instructions starting from the beginning and the
814  // relocation information starting from the end of the buffer. See CodeDesc
815  // for a detailed comment on the layout (globals.h).
816  //
817  // If the provided buffer is NULL, the assembler allocates and grows its own
818  // buffer, and buffer_size determines the initial buffer size. The buffer is
819  // owned by the assembler and deallocated upon destruction of the assembler.
820  //
821  // If the provided buffer is not NULL, the assembler uses the provided buffer
822  // for code generation and assumes its size to be buffer_size. If the buffer
823  // is too small, a fatal error occurs. No deallocation of the buffer is done
824  // upon destruction of the assembler.
825  Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
826 
827  virtual ~Assembler();
828 
829  virtual void AbortedCodeGeneration() {
830  constpool_.Clear();
831  }
832 
833  // System functions ---------------------------------------------------------
834  // Start generating code from the beginning of the buffer, discarding any code
835  // and data that has already been emitted into the buffer.
836  //
837  // In order to avoid any accidental transfer of state, Reset DCHECKs that the
838  // constant pool is not blocked.
839  void Reset();
840 
841  // GetCode emits any pending (non-emitted) code and fills the descriptor
842  // desc. GetCode() is idempotent; it returns the same result if no other
843  // Assembler functions are invoked in between GetCode() calls.
844  //
845  // The descriptor (desc) can be NULL. In that case, the code is finalized as
846  // usual, but the descriptor is not populated.
847  void GetCode(CodeDesc* desc);
848 
849  // Insert the smallest number of nop instructions
850  // possible to align the pc offset to a multiple
851  // of m. m must be a power of 2 (>= 4).
852  void Align(int m);
853 
854  inline void Unreachable();
855 
856  // Label --------------------------------------------------------------------
857  // Bind a label to the current pc. Note that labels can only be bound once,
858  // and if labels are linked to other instructions, they _must_ be bound
859  // before they go out of scope.
860  void bind(Label* label);
861 
862 
863  // RelocInfo and pools ------------------------------------------------------
864 
865  // Record relocation information for current pc_.
866  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
867 
868  // Return the address in the constant pool of the code target address used by
869  // the branch/call instruction at pc.
871 
872  // Read/Modify the code target address in the branch/call instruction at pc.
873  inline static Address target_address_at(Address pc,
874  ConstantPoolArray* constant_pool);
875  inline static void set_target_address_at(Address pc,
876  ConstantPoolArray* constant_pool,
877  Address target,
878  ICacheFlushMode icache_flush_mode =
880  static inline Address target_address_at(Address pc, Code* code);
881  static inline void set_target_address_at(Address pc,
882  Code* code,
883  Address target,
884  ICacheFlushMode icache_flush_mode =
886 
887  // Return the code target address at a call site from the return address of
888  // that call in the instruction stream.
890 
891  // Given the address of the beginning of a call, return the address in the
892  // instruction stream that call will return from.
894 
895  // Return the code target address of the patch debug break slot
897 
898  // This sets the branch destination (which is in the constant pool on ARM).
899  // This is for calls and branches within generated code.
901  Address constant_pool_entry, Code* code, Address target);
902 
903  // All addresses in the constant pool are the same size as pointers.
904  static const int kSpecialTargetSize = kPointerSize;
905 
906  // The sizes of the call sequences emitted by MacroAssembler::Call.
907  // Wherever possible, use MacroAssembler::CallSize instead of these constants,
908  // as it will choose the correct value for a given relocation mode.
909  //
910  // Without relocation:
911  // movz temp, #(target & 0x000000000000ffff)
912  // movk temp, #(target & 0x00000000ffff0000)
913  // movk temp, #(target & 0x0000ffff00000000)
914  // blr temp
915  //
916  // With relocation:
917  // ldr temp, =target
918  // blr temp
921 
922  // Size of the generated code in bytes
923  uint64_t SizeOfGeneratedCode() const {
924  DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
925  return pc_ - buffer_;
926  }
927 
928  // Return the code size generated from label to the current position.
929  uint64_t SizeOfCodeGeneratedSince(const Label* label) {
930  DCHECK(label->is_bound());
931  DCHECK(pc_offset() >= label->pos());
933  return pc_offset() - label->pos();
934  }
935 
936  // Check the size of the code generated since the given label. This function
937  // is used primarily to work around comparisons between signed and unsigned
938  // quantities, since V8 uses both.
939  // TODO(jbramley): Work out what sign to use for these things and if possible,
940  // change things to be consistent.
941  void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
942  DCHECK(size >= 0);
943  DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
944  }
945 
946  // Return the number of instructions generated from label to the
947  // current position.
948  int InstructionsGeneratedSince(const Label* label) {
950  }
951 
952  // Number of instructions generated for the return sequence in
953  // FullCodeGenerator::EmitReturnSequence.
954  static const int kJSRetSequenceInstructions = 7;
955  // Distance between start of patched return sequence and the emitted address
956  // to jump to.
957  static const int kPatchReturnSequenceAddressOffset = 0;
958  static const int kPatchDebugBreakSlotAddressOffset = 0;
959 
960  // Number of instructions necessary to be able to later patch it to a call.
961  // See DebugCodegen::GenerateSlot() and
962  // BreakLocationIterator::SetDebugBreakAtSlot().
963  static const int kDebugBreakSlotInstructions = 4;
964  static const int kDebugBreakSlotLength =
966 
967  static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
968 
969  // Prevent contant pool emission until EndBlockConstPool is called.
970  // Call to this function can be nested but must be followed by an equal
971  // number of call to EndBlockConstpool.
973 
974  // Resume constant pool emission. Need to be called as many time as
975  // StartBlockConstPool to have an effect.
977 
978  bool is_const_pool_blocked() const;
979  static bool IsConstantPoolAt(Instruction* instr);
980  static int ConstantPoolSizeAt(Instruction* instr);
981  // See Assembler::CheckConstPool for more info.
983 
984  // Prevent veneer pool emission until EndBlockVeneerPool is called.
985  // Call to this function can be nested but must be followed by an equal
986  // number of call to EndBlockConstpool.
988 
989  // Resume constant pool emission. Need to be called as many time as
990  // StartBlockVeneerPool to have an effect.
992 
993  bool is_veneer_pool_blocked() const {
994  return veneer_pool_blocked_nesting_ > 0;
995  }
996 
997  // Block/resume emission of constant pools and veneer pools.
1001  }
1002  void EndBlockPools() {
1005  }
1006 
1007  // Debugging ----------------------------------------------------------------
1009  void RecordComment(const char* msg);
1010  int buffer_space() const;
1011 
1012  // Mark address of the ExitJSFrame code.
1014 
1015  // Mark address of a debug break slot.
1017 
1018  // Record the emission of a constant pool.
1019  //
1020  // The emission of constant and veneer pools depends on the size of the code
1021  // generated and the number of RelocInfo recorded.
1022  // The Debug mechanism needs to map code offsets between two versions of a
1023  // function, compiled with and without debugger support (see for example
1024  // Debug::PrepareForBreakPoints()).
1025  // Compiling functions with debugger support generates additional code
1026  // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
1027  // and cause the version of the code with debugger support to have pools
1028  // generated in different places.
1029  // Recording the position and size of emitted pools allows to correctly
1030  // compute the offset mappings between the different versions of a function in
1031  // all situations.
1032  //
1033  // The parameter indicates the size of the pool (in bytes), including
1034  // the marker and branch over the data.
1036 
1037 
1038  // Instruction set functions ------------------------------------------------
1039 
1040  // Branch / Jump instructions.
1041  // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
1042  // Branch to register.
1043  void br(const Register& xn);
1044 
1045  // Branch-link to register.
1046  void blr(const Register& xn);
1047 
1048  // Branch to register with return hint.
1049  void ret(const Register& xn = lr);
1050 
1051  // Unconditional branch to label.
1052  void b(Label* label);
1053 
1054  // Conditional branch to label.
1055  void b(Label* label, Condition cond);
1056 
1057  // Unconditional branch to PC offset.
1058  void b(int imm26);
1059 
1060  // Conditional branch to PC offset.
1061  void b(int imm19, Condition cond);
1062 
1063  // Branch-link to label / pc offset.
1064  void bl(Label* label);
1065  void bl(int imm26);
1066 
1067  // Compare and branch to label / pc offset if zero.
1068  void cbz(const Register& rt, Label* label);
1069  void cbz(const Register& rt, int imm19);
1070 
1071  // Compare and branch to label / pc offset if not zero.
1072  void cbnz(const Register& rt, Label* label);
1073  void cbnz(const Register& rt, int imm19);
1074 
1075  // Test bit and branch to label / pc offset if zero.
1076  void tbz(const Register& rt, unsigned bit_pos, Label* label);
1077  void tbz(const Register& rt, unsigned bit_pos, int imm14);
1078 
1079  // Test bit and branch to label / pc offset if not zero.
1080  void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1081  void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1082 
1083  // Address calculation instructions.
1084  // Calculate a PC-relative address. Unlike for branches the offset in adr is
1085  // unscaled (i.e. the result can be unaligned).
1086  void adr(const Register& rd, Label* label);
1087  void adr(const Register& rd, int imm21);
1088 
1089  // Data Processing instructions.
1090  // Add.
1091  void add(const Register& rd,
1092  const Register& rn,
1093  const Operand& operand);
1094 
1095  // Add and update status flags.
1096  void adds(const Register& rd,
1097  const Register& rn,
1098  const Operand& operand);
1099 
1100  // Compare negative.
1101  void cmn(const Register& rn, const Operand& operand);
1102 
1103  // Subtract.
1104  void sub(const Register& rd,
1105  const Register& rn,
1106  const Operand& operand);
1107 
1108  // Subtract and update status flags.
1109  void subs(const Register& rd,
1110  const Register& rn,
1111  const Operand& operand);
1112 
1113  // Compare.
1114  void cmp(const Register& rn, const Operand& operand);
1115 
1116  // Negate.
1117  void neg(const Register& rd,
1118  const Operand& operand);
1119 
1120  // Negate and update status flags.
1121  void negs(const Register& rd,
1122  const Operand& operand);
1123 
1124  // Add with carry bit.
1125  void adc(const Register& rd,
1126  const Register& rn,
1127  const Operand& operand);
1128 
1129  // Add with carry bit and update status flags.
1130  void adcs(const Register& rd,
1131  const Register& rn,
1132  const Operand& operand);
1133 
1134  // Subtract with carry bit.
1135  void sbc(const Register& rd,
1136  const Register& rn,
1137  const Operand& operand);
1138 
1139  // Subtract with carry bit and update status flags.
1140  void sbcs(const Register& rd,
1141  const Register& rn,
1142  const Operand& operand);
1143 
1144  // Negate with carry bit.
1145  void ngc(const Register& rd,
1146  const Operand& operand);
1147 
1148  // Negate with carry bit and update status flags.
1149  void ngcs(const Register& rd,
1150  const Operand& operand);
1151 
1152  // Logical instructions.
1153  // Bitwise and (A & B).
1154  void and_(const Register& rd,
1155  const Register& rn,
1156  const Operand& operand);
1157 
1158  // Bitwise and (A & B) and update status flags.
1159  void ands(const Register& rd,
1160  const Register& rn,
1161  const Operand& operand);
1162 
1163  // Bit test, and set flags.
1164  void tst(const Register& rn, const Operand& operand);
1165 
1166  // Bit clear (A & ~B).
1167  void bic(const Register& rd,
1168  const Register& rn,
1169  const Operand& operand);
1170 
1171  // Bit clear (A & ~B) and update status flags.
1172  void bics(const Register& rd,
1173  const Register& rn,
1174  const Operand& operand);
1175 
1176  // Bitwise or (A | B).
1177  void orr(const Register& rd, const Register& rn, const Operand& operand);
1178 
1179  // Bitwise nor (A | ~B).
1180  void orn(const Register& rd, const Register& rn, const Operand& operand);
1181 
1182  // Bitwise eor/xor (A ^ B).
1183  void eor(const Register& rd, const Register& rn, const Operand& operand);
1184 
1185  // Bitwise enor/xnor (A ^ ~B).
1186  void eon(const Register& rd, const Register& rn, const Operand& operand);
1187 
1188  // Logical shift left variable.
1189  void lslv(const Register& rd, const Register& rn, const Register& rm);
1190 
1191  // Logical shift right variable.
1192  void lsrv(const Register& rd, const Register& rn, const Register& rm);
1193 
1194  // Arithmetic shift right variable.
1195  void asrv(const Register& rd, const Register& rn, const Register& rm);
1196 
1197  // Rotate right variable.
1198  void rorv(const Register& rd, const Register& rn, const Register& rm);
1199 
1200  // Bitfield instructions.
1201  // Bitfield move.
1202  void bfm(const Register& rd,
1203  const Register& rn,
1204  unsigned immr,
1205  unsigned imms);
1206 
1207  // Signed bitfield move.
1208  void sbfm(const Register& rd,
1209  const Register& rn,
1210  unsigned immr,
1211  unsigned imms);
1212 
1213  // Unsigned bitfield move.
1214  void ubfm(const Register& rd,
1215  const Register& rn,
1216  unsigned immr,
1217  unsigned imms);
1218 
1219  // Bfm aliases.
1220  // Bitfield insert.
1221  void bfi(const Register& rd,
1222  const Register& rn,
1223  unsigned lsb,
1224  unsigned width) {
1225  DCHECK(width >= 1);
1226  DCHECK(lsb + width <= rn.SizeInBits());
1227  bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1228  }
1229 
1230  // Bitfield extract and insert low.
1231  void bfxil(const Register& rd,
1232  const Register& rn,
1233  unsigned lsb,
1234  unsigned width) {
1235  DCHECK(width >= 1);
1236  DCHECK(lsb + width <= rn.SizeInBits());
1237  bfm(rd, rn, lsb, lsb + width - 1);
1238  }
1239 
1240  // Sbfm aliases.
1241  // Arithmetic shift right.
1242  void asr(const Register& rd, const Register& rn, unsigned shift) {
1243  DCHECK(shift < rd.SizeInBits());
1244  sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1245  }
1246 
1247  // Signed bitfield insert in zero.
1248  void sbfiz(const Register& rd,
1249  const Register& rn,
1250  unsigned lsb,
1251  unsigned width) {
1252  DCHECK(width >= 1);
1253  DCHECK(lsb + width <= rn.SizeInBits());
1254  sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1255  }
1256 
1257  // Signed bitfield extract.
1258  void sbfx(const Register& rd,
1259  const Register& rn,
1260  unsigned lsb,
1261  unsigned width) {
1262  DCHECK(width >= 1);
1263  DCHECK(lsb + width <= rn.SizeInBits());
1264  sbfm(rd, rn, lsb, lsb + width - 1);
1265  }
1266 
1267  // Signed extend byte.
1268  void sxtb(const Register& rd, const Register& rn) {
1269  sbfm(rd, rn, 0, 7);
1270  }
1271 
1272  // Signed extend halfword.
1273  void sxth(const Register& rd, const Register& rn) {
1274  sbfm(rd, rn, 0, 15);
1275  }
1276 
1277  // Signed extend word.
1278  void sxtw(const Register& rd, const Register& rn) {
1279  sbfm(rd, rn, 0, 31);
1280  }
1281 
1282  // Ubfm aliases.
1283  // Logical shift left.
1284  void lsl(const Register& rd, const Register& rn, unsigned shift) {
1285  unsigned reg_size = rd.SizeInBits();
1286  DCHECK(shift < reg_size);
1287  ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1288  }
1289 
1290  // Logical shift right.
1291  void lsr(const Register& rd, const Register& rn, unsigned shift) {
1292  DCHECK(shift < rd.SizeInBits());
1293  ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1294  }
1295 
1296  // Unsigned bitfield insert in zero.
1297  void ubfiz(const Register& rd,
1298  const Register& rn,
1299  unsigned lsb,
1300  unsigned width) {
1301  DCHECK(width >= 1);
1302  DCHECK(lsb + width <= rn.SizeInBits());
1303  ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1304  }
1305 
1306  // Unsigned bitfield extract.
1307  void ubfx(const Register& rd,
1308  const Register& rn,
1309  unsigned lsb,
1310  unsigned width) {
1311  DCHECK(width >= 1);
1312  DCHECK(lsb + width <= rn.SizeInBits());
1313  ubfm(rd, rn, lsb, lsb + width - 1);
1314  }
1315 
1316  // Unsigned extend byte.
1317  void uxtb(const Register& rd, const Register& rn) {
1318  ubfm(rd, rn, 0, 7);
1319  }
1320 
1321  // Unsigned extend halfword.
1322  void uxth(const Register& rd, const Register& rn) {
1323  ubfm(rd, rn, 0, 15);
1324  }
1325 
1326  // Unsigned extend word.
1327  void uxtw(const Register& rd, const Register& rn) {
1328  ubfm(rd, rn, 0, 31);
1329  }
1330 
1331  // Extract.
1332  void extr(const Register& rd,
1333  const Register& rn,
1334  const Register& rm,
1335  unsigned lsb);
1336 
1337  // Conditional select: rd = cond ? rn : rm.
1338  void csel(const Register& rd,
1339  const Register& rn,
1340  const Register& rm,
1341  Condition cond);
1342 
1343  // Conditional select increment: rd = cond ? rn : rm + 1.
1344  void csinc(const Register& rd,
1345  const Register& rn,
1346  const Register& rm,
1347  Condition cond);
1348 
1349  // Conditional select inversion: rd = cond ? rn : ~rm.
1350  void csinv(const Register& rd,
1351  const Register& rn,
1352  const Register& rm,
1353  Condition cond);
1354 
1355  // Conditional select negation: rd = cond ? rn : -rm.
1356  void csneg(const Register& rd,
1357  const Register& rn,
1358  const Register& rm,
1359  Condition cond);
1360 
1361  // Conditional set: rd = cond ? 1 : 0.
1362  void cset(const Register& rd, Condition cond);
1363 
1364  // Conditional set minus: rd = cond ? -1 : 0.
1365  void csetm(const Register& rd, Condition cond);
1366 
1367  // Conditional increment: rd = cond ? rn + 1 : rn.
1368  void cinc(const Register& rd, const Register& rn, Condition cond);
1369 
1370  // Conditional invert: rd = cond ? ~rn : rn.
1371  void cinv(const Register& rd, const Register& rn, Condition cond);
1372 
1373  // Conditional negate: rd = cond ? -rn : rn.
1374  void cneg(const Register& rd, const Register& rn, Condition cond);
1375 
1376  // Extr aliases.
1377  void ror(const Register& rd, const Register& rs, unsigned shift) {
1378  extr(rd, rs, rs, shift);
1379  }
1380 
1381  // Conditional comparison.
1382  // Conditional compare negative.
1383  void ccmn(const Register& rn,
1384  const Operand& operand,
1385  StatusFlags nzcv,
1386  Condition cond);
1387 
1388  // Conditional compare.
1389  void ccmp(const Register& rn,
1390  const Operand& operand,
1391  StatusFlags nzcv,
1392  Condition cond);
1393 
1394  // Multiplication.
1395  // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1396  void mul(const Register& rd, const Register& rn, const Register& rm);
1397 
1398  // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1399  void madd(const Register& rd,
1400  const Register& rn,
1401  const Register& rm,
1402  const Register& ra);
1403 
1404  // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1405  void mneg(const Register& rd, const Register& rn, const Register& rm);
1406 
1407  // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1408  void msub(const Register& rd,
1409  const Register& rn,
1410  const Register& rm,
1411  const Register& ra);
1412 
1413  // 32 x 32 -> 64-bit multiply.
1414  void smull(const Register& rd, const Register& rn, const Register& rm);
1415 
1416  // Xd = bits<127:64> of Xn * Xm.
1417  void smulh(const Register& rd, const Register& rn, const Register& rm);
1418 
1419  // Signed 32 x 32 -> 64-bit multiply and accumulate.
1420  void smaddl(const Register& rd,
1421  const Register& rn,
1422  const Register& rm,
1423  const Register& ra);
1424 
1425  // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1426  void umaddl(const Register& rd,
1427  const Register& rn,
1428  const Register& rm,
1429  const Register& ra);
1430 
1431  // Signed 32 x 32 -> 64-bit multiply and subtract.
1432  void smsubl(const Register& rd,
1433  const Register& rn,
1434  const Register& rm,
1435  const Register& ra);
1436 
1437  // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1438  void umsubl(const Register& rd,
1439  const Register& rn,
1440  const Register& rm,
1441  const Register& ra);
1442 
1443  // Signed integer divide.
1444  void sdiv(const Register& rd, const Register& rn, const Register& rm);
1445 
1446  // Unsigned integer divide.
1447  void udiv(const Register& rd, const Register& rn, const Register& rm);
1448 
1449  // Bit count, bit reverse and endian reverse.
1450  void rbit(const Register& rd, const Register& rn);
1451  void rev16(const Register& rd, const Register& rn);
1452  void rev32(const Register& rd, const Register& rn);
1453  void rev(const Register& rd, const Register& rn);
1454  void clz(const Register& rd, const Register& rn);
1455  void cls(const Register& rd, const Register& rn);
1456 
1457  // Memory instructions.
1458 
1459  // Load integer or FP register.
1460  void ldr(const CPURegister& rt, const MemOperand& src);
1461 
1462  // Store integer or FP register.
1463  void str(const CPURegister& rt, const MemOperand& dst);
1464 
1465  // Load word with sign extension.
1466  void ldrsw(const Register& rt, const MemOperand& src);
1467 
1468  // Load byte.
1469  void ldrb(const Register& rt, const MemOperand& src);
1470 
1471  // Store byte.
1472  void strb(const Register& rt, const MemOperand& dst);
1473 
1474  // Load byte with sign extension.
1475  void ldrsb(const Register& rt, const MemOperand& src);
1476 
1477  // Load half-word.
1478  void ldrh(const Register& rt, const MemOperand& src);
1479 
1480  // Store half-word.
1481  void strh(const Register& rt, const MemOperand& dst);
1482 
1483  // Load half-word with sign extension.
1484  void ldrsh(const Register& rt, const MemOperand& src);
1485 
1486  // Load integer or FP register pair.
1487  void ldp(const CPURegister& rt, const CPURegister& rt2,
1488  const MemOperand& src);
1489 
1490  // Store integer or FP register pair.
1491  void stp(const CPURegister& rt, const CPURegister& rt2,
1492  const MemOperand& dst);
1493 
1494  // Load word pair with sign extension.
1495  void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1496 
1497  // Load integer or FP register pair, non-temporal.
1498  void ldnp(const CPURegister& rt, const CPURegister& rt2,
1499  const MemOperand& src);
1500 
1501  // Store integer or FP register pair, non-temporal.
1502  void stnp(const CPURegister& rt, const CPURegister& rt2,
1503  const MemOperand& dst);
1504 
1505  // Load literal to register from a pc relative address.
1506  void ldr_pcrel(const CPURegister& rt, int imm19);
1507 
1508  // Load literal to register.
1509  void ldr(const CPURegister& rt, const Immediate& imm);
1510 
1511  // Move instructions. The default shift of -1 indicates that the move
1512  // instruction will calculate an appropriate 16-bit immediate and left shift
1513  // that is equal to the 64-bit immediate argument. If an explicit left shift
1514  // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1515  //
1516  // For movk, an explicit shift can be used to indicate which half word should
1517  // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1518  // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1519  // most-significant.
1520 
1521  // Move and keep.
1522  void movk(const Register& rd, uint64_t imm, int shift = -1) {
1523  MoveWide(rd, imm, shift, MOVK);
1524  }
1525 
1526  // Move with non-zero.
1527  void movn(const Register& rd, uint64_t imm, int shift = -1) {
1528  MoveWide(rd, imm, shift, MOVN);
1529  }
1530 
1531  // Move with zero.
1532  void movz(const Register& rd, uint64_t imm, int shift = -1) {
1533  MoveWide(rd, imm, shift, MOVZ);
1534  }
1535 
1536  // Misc instructions.
1537  // Monitor debug-mode breakpoint.
1538  void brk(int code);
1539 
1540  // Halting debug-mode breakpoint.
1541  void hlt(int code);
1542 
1543  // Move register to register.
1544  void mov(const Register& rd, const Register& rn);
1545 
1546  // Move NOT(operand) to register.
1547  void mvn(const Register& rd, const Operand& operand);
1548 
1549  // System instructions.
1550  // Move to register from system register.
1551  void mrs(const Register& rt, SystemRegister sysreg);
1552 
1553  // Move from register to system register.
1554  void msr(SystemRegister sysreg, const Register& rt);
1555 
1556  // System hint.
1557  void hint(SystemHint code);
1558 
1559  // Data memory barrier
1560  void dmb(BarrierDomain domain, BarrierType type);
1561 
1562  // Data synchronization barrier
1563  void dsb(BarrierDomain domain, BarrierType type);
1564 
1565  // Instruction synchronization barrier
1566  void isb();
1567 
1568  // Alias for system instructions.
1569  void nop() { hint(NOP); }
1570 
1571  // Different nop operations are used by the code generator to detect certain
1572  // states of the generated code.
1579  };
1580 
1582  DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1584  }
1585 
1586  // FP instructions.
1587  // Move immediate to FP register.
1588  void fmov(FPRegister fd, double imm);
1589  void fmov(FPRegister fd, float imm);
1590 
1591  // Move FP register to register.
1592  void fmov(Register rd, FPRegister fn);
1593 
1594  // Move register to FP register.
1595  void fmov(FPRegister fd, Register rn);
1596 
1597  // Move FP register to FP register.
1599 
1600  // FP add.
1601  void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1602 
1603  // FP subtract.
1604  void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1605 
1606  // FP multiply.
1607  void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1608 
1609  // FP fused multiply and add.
1610  void fmadd(const FPRegister& fd,
1611  const FPRegister& fn,
1612  const FPRegister& fm,
1613  const FPRegister& fa);
1614 
1615  // FP fused multiply and subtract.
1616  void fmsub(const FPRegister& fd,
1617  const FPRegister& fn,
1618  const FPRegister& fm,
1619  const FPRegister& fa);
1620 
1621  // FP fused multiply, add and negate.
1622  void fnmadd(const FPRegister& fd,
1623  const FPRegister& fn,
1624  const FPRegister& fm,
1625  const FPRegister& fa);
1626 
1627  // FP fused multiply, subtract and negate.
1628  void fnmsub(const FPRegister& fd,
1629  const FPRegister& fn,
1630  const FPRegister& fm,
1631  const FPRegister& fa);
1632 
1633  // FP divide.
1634  void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1635 
1636  // FP maximum.
1637  void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1638 
1639  // FP minimum.
1640  void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1641 
1642  // FP maximum.
1643  void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1644 
1645  // FP minimum.
1646  void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1647 
1648  // FP absolute.
1649  void fabs(const FPRegister& fd, const FPRegister& fn);
1650 
1651  // FP negate.
1652  void fneg(const FPRegister& fd, const FPRegister& fn);
1653 
1654  // FP square root.
1655  void fsqrt(const FPRegister& fd, const FPRegister& fn);
1656 
1657  // FP round to integer (nearest with ties to away).
1658  void frinta(const FPRegister& fd, const FPRegister& fn);
1659 
1660  // FP round to integer (toward minus infinity).
1661  void frintm(const FPRegister& fd, const FPRegister& fn);
1662 
1663  // FP round to integer (nearest with ties to even).
1664  void frintn(const FPRegister& fd, const FPRegister& fn);
1665 
1666  // FP round to integer (towards zero.)
1667  void frintz(const FPRegister& fd, const FPRegister& fn);
1668 
1669  // FP compare registers.
1670  void fcmp(const FPRegister& fn, const FPRegister& fm);
1671 
1672  // FP compare immediate.
1673  void fcmp(const FPRegister& fn, double value);
1674 
1675  // FP conditional compare.
1676  void fccmp(const FPRegister& fn,
1677  const FPRegister& fm,
1678  StatusFlags nzcv,
1679  Condition cond);
1680 
1681  // FP conditional select.
1682  void fcsel(const FPRegister& fd,
1683  const FPRegister& fn,
1684  const FPRegister& fm,
1685  Condition cond);
1686 
1687  // Common FP Convert function
1688  void FPConvertToInt(const Register& rd,
1689  const FPRegister& fn,
1690  FPIntegerConvertOp op);
1691 
1692  // FP convert between single and double precision.
1693  void fcvt(const FPRegister& fd, const FPRegister& fn);
1694 
1695  // Convert FP to unsigned integer (nearest with ties to away).
1696  void fcvtau(const Register& rd, const FPRegister& fn);
1697 
1698  // Convert FP to signed integer (nearest with ties to away).
1699  void fcvtas(const Register& rd, const FPRegister& fn);
1700 
1701  // Convert FP to unsigned integer (round towards -infinity).
1702  void fcvtmu(const Register& rd, const FPRegister& fn);
1703 
1704  // Convert FP to signed integer (round towards -infinity).
1705  void fcvtms(const Register& rd, const FPRegister& fn);
1706 
1707  // Convert FP to unsigned integer (nearest with ties to even).
1708  void fcvtnu(const Register& rd, const FPRegister& fn);
1709 
1710  // Convert FP to signed integer (nearest with ties to even).
1711  void fcvtns(const Register& rd, const FPRegister& fn);
1712 
1713  // Convert FP to unsigned integer (round towards zero).
1714  void fcvtzu(const Register& rd, const FPRegister& fn);
1715 
1716  // Convert FP to signed integer (rounf towards zero).
1717  void fcvtzs(const Register& rd, const FPRegister& fn);
1718 
1719  // Convert signed integer or fixed point to FP.
1720  void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1721 
1722  // Convert unsigned integer or fixed point to FP.
1723  void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1724 
1725  // Instruction functions used only for test, debug, and patching.
1726  // Emit raw instructions in the instruction stream.
1727  void dci(Instr raw_inst) { Emit(raw_inst); }
1728 
1729  // Emit 8 bits of data in the instruction stream.
1730  void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1731 
1732  // Emit 32 bits of data in the instruction stream.
1733  void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1734 
1735  // Emit 64 bits of data in the instruction stream.
1736  void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1737 
1738  // Copy a string into the instruction stream, including the terminating NULL
1739  // character. The instruction pointer (pc_) is then aligned correctly for
1740  // subsequent instructions.
1741  void EmitStringData(const char* string);
1742 
1743  // Pseudo-instructions ------------------------------------------------------
1744 
1745  // Parameters are described in arm64/instructions-arm64.h.
1746  void debug(const char* message, uint32_t code, Instr params = BREAK);
1747 
1748  // Required by V8.
1749  void dd(uint32_t data) { dc32(data); }
1750  void db(uint8_t data) { dc8(data); }
1751 
1752  // Code generation helpers --------------------------------------------------
1753 
1754  bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1755 
1756  Instruction* pc() const { return Instruction::Cast(pc_); }
1757 
1758  Instruction* InstructionAt(int offset) const {
1759  return reinterpret_cast<Instruction*>(buffer_ + offset);
1760  }
1761 
1762  ptrdiff_t InstructionOffset(Instruction* instr) const {
1763  return reinterpret_cast<byte*>(instr) - buffer_;
1764  }
1765 
1766  // Register encoding.
1767  static Instr Rd(CPURegister rd) {
1768  DCHECK(rd.code() != kSPRegInternalCode);
1769  return rd.code() << Rd_offset;
1770  }
1771 
1772  static Instr Rn(CPURegister rn) {
1773  DCHECK(rn.code() != kSPRegInternalCode);
1774  return rn.code() << Rn_offset;
1775  }
1776 
1777  static Instr Rm(CPURegister rm) {
1778  DCHECK(rm.code() != kSPRegInternalCode);
1779  return rm.code() << Rm_offset;
1780  }
1781 
1782  static Instr Ra(CPURegister ra) {
1783  DCHECK(ra.code() != kSPRegInternalCode);
1784  return ra.code() << Ra_offset;
1785  }
1786 
1787  static Instr Rt(CPURegister rt) {
1788  DCHECK(rt.code() != kSPRegInternalCode);
1789  return rt.code() << Rt_offset;
1790  }
1791 
1792  static Instr Rt2(CPURegister rt2) {
1793  DCHECK(rt2.code() != kSPRegInternalCode);
1794  return rt2.code() << Rt2_offset;
1795  }
1796 
1797  // These encoding functions allow the stack pointer to be encoded, and
1798  // disallow the zero register.
1799  static Instr RdSP(Register rd) {
1800  DCHECK(!rd.IsZero());
1801  return (rd.code() & kRegCodeMask) << Rd_offset;
1802  }
1803 
1804  static Instr RnSP(Register rn) {
1805  DCHECK(!rn.IsZero());
1806  return (rn.code() & kRegCodeMask) << Rn_offset;
1807  }
1808 
1809  // Flags encoding.
1810  inline static Instr Flags(FlagsUpdate S);
1811  inline static Instr Cond(Condition cond);
1812 
1813  // PC-relative address encoding.
1814  inline static Instr ImmPCRelAddress(int imm21);
1815 
1816  // Branch encoding.
1817  inline static Instr ImmUncondBranch(int imm26);
1818  inline static Instr ImmCondBranch(int imm19);
1819  inline static Instr ImmCmpBranch(int imm19);
1820  inline static Instr ImmTestBranch(int imm14);
1821  inline static Instr ImmTestBranchBit(unsigned bit_pos);
1822 
1823  // Data Processing encoding.
1824  inline static Instr SF(Register rd);
1825  inline static Instr ImmAddSub(int64_t imm);
1826  inline static Instr ImmS(unsigned imms, unsigned reg_size);
1827  inline static Instr ImmR(unsigned immr, unsigned reg_size);
1828  inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1829  inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1830  inline static Instr ImmLLiteral(int imm19);
1831  inline static Instr BitN(unsigned bitn, unsigned reg_size);
1832  inline static Instr ShiftDP(Shift shift);
1833  inline static Instr ImmDPShift(unsigned amount);
1834  inline static Instr ExtendMode(Extend extend);
1835  inline static Instr ImmExtendShift(unsigned left_shift);
1836  inline static Instr ImmCondCmp(unsigned imm);
1837  inline static Instr Nzcv(StatusFlags nzcv);
1838 
1839  static bool IsImmAddSub(int64_t immediate);
1840  static bool IsImmLogical(uint64_t value,
1841  unsigned width,
1842  unsigned* n,
1843  unsigned* imm_s,
1844  unsigned* imm_r);
1845 
1846  // MemOperand offset encoding.
1847  inline static Instr ImmLSUnsigned(int imm12);
1848  inline static Instr ImmLS(int imm9);
1849  inline static Instr ImmLSPair(int imm7, LSDataSize size);
1850  inline static Instr ImmShiftLS(unsigned shift_amount);
1851  inline static Instr ImmException(int imm16);
1852  inline static Instr ImmSystemRegister(int imm15);
1853  inline static Instr ImmHint(int imm7);
1854  inline static Instr ImmBarrierDomain(int imm2);
1855  inline static Instr ImmBarrierType(int imm2);
1856  inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1857 
1858  static bool IsImmLSUnscaled(int64_t offset);
1859  static bool IsImmLSScaled(int64_t offset, LSDataSize size);
1860 
1861  // Move immediates encoding.
1862  inline static Instr ImmMoveWide(uint64_t imm);
1863  inline static Instr ShiftMoveWide(int64_t shift);
1864 
1865  // FP Immediates.
1866  static Instr ImmFP32(float imm);
1867  static Instr ImmFP64(double imm);
1868  inline static Instr FPScale(unsigned scale);
1869 
1870  // FP register type.
1871  inline static Instr FPType(FPRegister fd);
1872 
1873  // Class for scoping postponing the constant pool generation.
1874  class BlockConstPoolScope {
1875  public:
1876  explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1878  }
1881  }
1882 
1883  private:
1884  Assembler* assem_;
1885 
1887  };
1888 
1889  // Check if is time to emit a constant pool.
1890  void CheckConstPool(bool force_emit, bool require_jump);
1891 
1892  // Allocate a constant pool of the correct size for the generated code.
1894 
1895  // Generate the constant pool for the generated code.
1897 
1898  // Returns true if we should emit a veneer as soon as possible for a branch
1899  // which can at most reach to specified pc.
1900  bool ShouldEmitVeneer(int max_reachable_pc,
1901  int margin = kVeneerDistanceMargin);
1904  }
1905 
1906  // The maximum code size generated for a veneer. Currently one branch
1907  // instruction. This is for code size checking purposes, and can be extended
1908  // in the future for example if we decide to add nops between the veneers.
1909  static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1910 
1911  void RecordVeneerPool(int location_offset, int size);
1912  // Emits veneers for branches that are approaching their maximum range.
1913  // If need_protection is true, the veneers are protected by a branch jumping
1914  // over the code.
1915  void EmitVeneers(bool force_emit, bool need_protection,
1916  int margin = kVeneerDistanceMargin);
1918  // Checks whether veneers need to be emitted at this point.
1919  // If force_emit is set, a veneer is generated for *all* unresolved branches.
1920  void CheckVeneerPool(bool force_emit, bool require_jump,
1921  int margin = kVeneerDistanceMargin);
1922 
1924  public:
1925  explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1927  }
1929  assem_->EndBlockPools();
1930  }
1931 
1932  private:
1934 
1936  };
1937 
1938  protected:
1939  inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1940 
1941  void LoadStore(const CPURegister& rt,
1942  const MemOperand& addr,
1943  LoadStoreOp op);
1944 
1945  void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1946  const MemOperand& addr, LoadStorePairOp op);
1947  static bool IsImmLSPair(int64_t offset, LSDataSize size);
1948 
1949  void Logical(const Register& rd,
1950  const Register& rn,
1951  const Operand& operand,
1952  LogicalOp op);
1953  void LogicalImmediate(const Register& rd,
1954  const Register& rn,
1955  unsigned n,
1956  unsigned imm_s,
1957  unsigned imm_r,
1958  LogicalOp op);
1959 
1961  const Operand& operand,
1962  StatusFlags nzcv,
1963  Condition cond,
1965  static bool IsImmConditionalCompare(int64_t immediate);
1966 
1967  void AddSubWithCarry(const Register& rd,
1968  const Register& rn,
1969  const Operand& operand,
1970  FlagsUpdate S,
1971  AddSubWithCarryOp op);
1972 
1973  // Functions for emulating operands not directly supported by the instruction
1974  // set.
1975  void EmitShift(const Register& rd,
1976  const Register& rn,
1977  Shift shift,
1978  unsigned amount);
1979  void EmitExtendShift(const Register& rd,
1980  const Register& rn,
1981  Extend extend,
1982  unsigned left_shift);
1983 
1984  void AddSub(const Register& rd,
1985  const Register& rn,
1986  const Operand& operand,
1987  FlagsUpdate S,
1988  AddSubOp op);
1989 
1990  static bool IsImmFP32(float imm);
1991  static bool IsImmFP64(double imm);
1992 
1993  // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
1994  // registers. Only simple loads are supported; sign- and zero-extension (such
1995  // as in LDPSW_x or LDRB_w) are not supported.
1996  static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
1997  static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
1998  const CPURegister& rt2);
1999  static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
2000  static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
2001  const CPURegister& rt2);
2003  const CPURegister& rt, const CPURegister& rt2);
2005  const CPURegister& rt, const CPURegister& rt2);
2006  static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
2007 
2008  // Remove the specified branch from the unbound label link chain.
2009  // If available, a veneer for this label can be used for other branches in the
2010  // chain if the link chain cannot be fixed up without this branch.
2012  Label* label,
2013  Instruction* label_veneer = NULL);
2014 
2015  private:
2016  // Instruction helpers.
2017  void MoveWide(const Register& rd,
2018  uint64_t imm,
2019  int shift,
2020  MoveWideImmediateOp mov_op);
2022  const Register& rn,
2023  const Operand& operand,
2024  FlagsUpdate S,
2025  Instr op);
2027  const Register& rn,
2028  const Operand& operand,
2029  FlagsUpdate S,
2030  Instr op);
2032  const CPURegister& rt2,
2033  const MemOperand& addr,
2035  void ConditionalSelect(const Register& rd,
2036  const Register& rn,
2037  const Register& rm,
2038  Condition cond,
2039  ConditionalSelectOp op);
2041  const Register& rn,
2044  const Register& rn,
2045  const Register& rm,
2046  const Register& ra,
2049  const FPRegister& fn,
2052  const FPRegister& fn,
2053  const FPRegister& fm,
2056  const FPRegister& fn,
2057  const FPRegister& fm,
2058  const FPRegister& fa,
2060 
2061  // Label helpers.
2062 
2063  // Return an offset for a label-referencing instruction, typically a branch.
2064  int LinkAndGetByteOffsetTo(Label* label);
2065 
2066  // This is the same as LinkAndGetByteOffsetTo, but return an offset
2067  // suitable for fields that take instruction offsets.
2068  inline int LinkAndGetInstructionOffsetTo(Label* label);
2069 
2070  static const int kStartOfLabelLinkChain = 0;
2071 
2072  // Verify that a label's link chain is intact.
2073  void CheckLabelLinkChain(Label const * label);
2074 
2075  void RecordLiteral(int64_t imm, unsigned size);
2076 
2077  // Postpone the generation of the constant pool for the specified number of
2078  // instructions.
2079  void BlockConstPoolFor(int instructions);
2080 
2081  // Set how far from current pc the next constant pool check will be.
2082  void SetNextConstPoolCheckIn(int instructions) {
2084  }
2085 
2086  // Emit the instruction at pc_.
2087  void Emit(Instr instruction) {
2088  STATIC_ASSERT(sizeof(*pc_) == 1);
2089  STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2090  DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2091 
2092  memcpy(pc_, &instruction, sizeof(instruction));
2093  pc_ += sizeof(instruction);
2094  CheckBuffer();
2095  }
2096 
2097  // Emit data inline in the instruction stream.
2098  void EmitData(void const * data, unsigned size) {
2099  DCHECK(sizeof(*pc_) == 1);
2100  DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
2101 
2102  // TODO(all): Somehow register we have some data here. Then we can
2103  // disassemble it correctly.
2104  memcpy(pc_, data, size);
2105  pc_ += size;
2106  CheckBuffer();
2107  }
2108 
2109  void GrowBuffer();
2110  void CheckBufferSpace();
2111  void CheckBuffer();
2112 
2113  // Pc offset of the next constant pool check.
2115 
2116  // Constant pool generation
2117  // Pools are emitted in the instruction stream. They are emitted when:
2118  // * the distance to the first use is above a pre-defined distance or
2119  // * the numbers of entries in the pool is above a pre-defined size or
2120  // * code generation is finished
2121  // If a pool needs to be emitted before code generation is finished a branch
2122  // over the emitted pool will be inserted.
2123 
2124  // Constants in the pool may be addresses of functions that gets relocated;
2125  // if so, a relocation info entry is associated to the constant pool entry.
2126 
2127  // Repeated checking whether the constant pool should be emitted is rather
2128  // expensive. By default we only check again once a number of instructions
2129  // has been generated. That also means that the sizing of the buffers is not
2130  // an exact science, and that we rely on some slop to not overrun buffers.
2131  static const int kCheckConstPoolInterval = 128;
2132 
2133  // Distance to first use after a which a pool will be emitted. Pool entries
2134  // are accessed with pc relative load therefore this cannot be more than
2135  // 1 * MB. Since constant pool emission checks are interval based this value
2136  // is an approximation.
2137  static const int kApproxMaxDistToConstPool = 64 * KB;
2138 
2139  // Number of pool entries after which a pool will be emitted. Since constant
2140  // pool emission checks are interval based this value is an approximation.
2141  static const int kApproxMaxPoolEntryCount = 512;
2142 
2143  // Emission of the constant pool may be blocked in some code sequences.
2144  int const_pool_blocked_nesting_; // Block emission if this is not zero.
2145  int no_const_pool_before_; // Block emission before this pc offset.
2146 
2147  // Emission of the veneer pools may be blocked in some code sequences.
2148  int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2149 
2150  // Relocation info generation
2151  // Each relocation is encoded as a variable size value
2152  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2153  RelocInfoWriter reloc_info_writer;
2154 
2155  // Relocation info records are also used during code generation as temporary
2156  // containers for constants and code target addresses until they are emitted
2157  // to the constant pool. These pending relocation info records are temporarily
2158  // stored in a separate buffer until a constant pool is emitted.
2159  // If every instruction in a long sequence is accessing the pool, we need one
2160  // pending relocation entry per instruction.
2161 
2162  // The pending constant pool.
2164 
2165  // Relocation for a type-recording IC has the AST id added to it. This
2166  // member variable is a way to pass the information from the call site to
2167  // the relocation info.
2169 
2171  inline void ClearRecordedAstId();
2172 
2173  protected:
2174  // Record the AST id of the CallIC being compiled, so that it can be placed
2175  // in the relocation information.
2178  recorded_ast_id_ = ast_id;
2179  }
2180 
2181  // Code generation
2182  // The relocation writer's position is at least kGap bytes below the end of
2183  // the generated instructions. This is so that multi-instruction sequences do
2184  // not have to check for overflow. The same is true for writes of large
2185  // relocation info entries, and debug strings encoded in the instruction
2186  // stream.
2187  static const int kGap = 128;
2188 
2189  public:
2191  public:
2192  FarBranchInfo(int offset, Label* label)
2193  : pc_offset_(offset), label_(label) {}
2194  // Offset of the branch in the code generation buffer.
2196  // The label branched to.
2197  Label* label_;
2198  };
2199 
2200  protected:
2201  // Information about unresolved (forward) branches.
2202  // The Assembler is only allowed to delete out-of-date information from here
2203  // after a label is bound. The MacroAssembler uses this information to
2204  // generate veneers.
2205  //
2206  // The second member gives information about the unresolved branch. The first
2207  // member of the pair is the maximum offset that the branch can reach in the
2208  // buffer. The map is sorted according to this reachable offset, allowing to
2209  // easily check when veneers need to be emitted.
2210  // Note that the maximum reachable offset (first member of the pairs) should
2211  // always be positive but has the same type as the return value for
2212  // pc_offset() for convenience.
2213  std::multimap<int, FarBranchInfo> unresolved_branches_;
2214 
2215  // We generate a veneer for a branch if we reach within this distance of the
2216  // limit of the range.
2217  static const int kVeneerDistanceMargin = 1 * KB;
2218  // The factor of 2 is a finger in the air guess. With a default margin of
2219  // 1KB, that leaves us an addional 256 instructions to avoid generating a
2220  // protective branch.
2221  static const int kVeneerNoProtectionFactor = 2;
2222  static const int kVeneerDistanceCheckMargin =
2225  DCHECK(!unresolved_branches_.empty());
2226  return unresolved_branches_.begin()->first;
2227  }
2228  // This is similar to next_constant_pool_check_ and helps reduce the overhead
2229  // of checking for veneer pools.
2230  // It is maintained to the closest unresolved branch limit minus the maximum
2231  // veneer margin (or kMaxInt if there are no unresolved branches).
2233 
2234  private:
2235  // If a veneer is emitted for a branch instruction, that instruction must be
2236  // removed from the associated label's link chain so that the assembler does
2237  // not later attempt (likely unsuccessfully) to patch it to branch directly to
2238  // the label.
2240  // This function deletes the information related to the label by traversing
2241  // the label chain, and for each PC-relative instruction in the chain checking
2242  // if pending unresolved information exists. Its complexity is proportional to
2243  // the length of the label chain.
2245 
2246  private:
2248  friend class PositionsRecorder;
2249  friend class EnsureSpace;
2250  friend class ConstPool;
2251 };
2252 
2254  public:
2255  // Create an Assembler with a buffer starting at 'start'.
2256  // The buffer size is
2257  // size of instructions to patch + kGap
2258  // Where kGap is the distance from which the Assembler tries to grow the
2259  // buffer.
2260  // If more or fewer instructions than expected are generated or if some
2261  // relocation information takes space in the buffer, the PatchingAssembler
2262  // will crash trying to grow the buffer.
2263  PatchingAssembler(Instruction* start, unsigned count)
2264  : Assembler(NULL,
2265  reinterpret_cast<byte*>(start),
2266  count * kInstructionSize + kGap) {
2267  StartBlockPools();
2268  }
2269 
2270  PatchingAssembler(byte* start, unsigned count)
2271  : Assembler(NULL, start, count * kInstructionSize + kGap) {
2272  // Block constant pool emission.
2273  StartBlockPools();
2274  }
2275 
2277  // Const pool should still be blocked.
2279  EndBlockPools();
2280  // Verify we have generated the number of instruction we expected.
2281  DCHECK((pc_offset() + kGap) == buffer_size_);
2282  // Verify no relocation information has been emitted.
2284  // Flush the Instruction cache.
2285  size_t length = buffer_size_ - kGap;
2287  }
2288 
2289  // See definition of PatchAdrFar() for details.
2290  static const int kAdrFarPatchableNNops = 2;
2292  void PatchAdrFar(int64_t target_offset);
2293 };
2294 
2295 
2296 class EnsureSpace BASE_EMBEDDED {
2297  public:
2298  explicit EnsureSpace(Assembler* assembler) {
2299  assembler->CheckBufferSpace();
2300  }
2301 };
2302 
2303 } } // namespace v8::internal
2304 
2305 #endif // V8_ARM64_ASSEMBLER_ARM64_H_
#define DEFINE_FPREGISTERS(N)
#define REGISTER_CODE_LIST(R)
#define DEFINE_REGISTERS(N)
Isolate * isolate() const
Definition: assembler.h:62
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope)
FarBranchInfo(int offset, Label *label)
static const int kSpecialTargetSize
static Instr Flags(FlagsUpdate S)
void uxtb(const Register &rd, const Register &rn)
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
void fabs(const FPRegister &fd, const FPRegister &fn)
void sbcs(const Register &rd, const Register &rn, const Operand &operand)
static Instr SF(Register rd)
void rev32(const Register &rd, const Register &rn)
static Instr RnSP(Register rn)
RelocInfoWriter reloc_info_writer
void RecordLiteral(int64_t imm, unsigned size)
static Instr ImmR(unsigned immr, unsigned reg_size)
void cset(const Register &rd, Condition cond)
void umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static Instr ImmTestBranch(int imm14)
void fcvtms(const Register &rd, const FPRegister &fn)
void mul(const Register &rd, const Register &rn, const Register &rm)
void fmov(FPRegister fd, double imm)
static Instr RdSP(Register rd)
static Instr ImmCondCmp(unsigned imm)
void fmov(FPRegister fd, float imm)
static const int kMaxRelocSize
static const int kCheckConstPoolInterval
void fcvtmu(const Register &rd, const FPRegister &fn)
Instruction * pc() const
void csetm(const Register &rd, Condition cond)
static Instr ImmMoveWide(uint64_t imm)
static Instr ImmFP32(float imm)
void DataProcExtendedRegister(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, Instr op)
void ngcs(const Register &rd, const Operand &operand)
void rbit(const Register &rd, const Register &rn)
void frintn(const FPRegister &fd, const FPRegister &fn)
void movn(const Register &rd, uint64_t imm, int shift=-1)
static Instr ImmCmpBranch(int imm19)
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmLSPair(int imm7, LSDataSize size)
void hint(SystemHint code)
static Instr Ra(CPURegister ra)
void bl(int imm26)
void RecordConstPool(int size)
void fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
void eor(const Register &rd, const Register &rn, const Operand &operand)
void smull(const Register &rd, const Register &rn, const Register &rm)
void ConditionalSelect(const Register &rd, const Register &rn, const Register &rm, Condition cond, ConditionalSelectOp op)
void fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static const int kVeneerDistanceCheckMargin
static Instr ImmPCRelAddress(int imm21)
static Instr ImmException(int imm16)
void stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void ands(const Register &rd, const Register &rn, const Operand &operand)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void adcs(const Register &rd, const Register &rn, const Operand &operand)
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void fmov(FPRegister fd, FPRegister fn)
void br(const Register &xn)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
void DeleteUnresolvedBranchInfoForLabelTraverse(Label *label)
void str(const CPURegister &rt, const MemOperand &dst)
void cmn(const Register &rn, const Operand &operand)
TypeFeedbackId RecordedAstId()
void fcmp(const FPRegister &fn, const FPRegister &fm)
void extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
uint64_t SizeOfCodeGeneratedSince(const Label *label)
void RecordVeneerPool(int location_offset, int size)
void ubfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static bool IsConstantPoolAt(Instruction *instr)
void fcvtzu(const Register &rd, const FPRegister &fn)
static Instr Cond(Condition cond)
void strh(const Register &rt, const MemOperand &dst)
static Instr Rm(CPURegister rm)
static Instr Rd(CPURegister rd)
void sxtw(const Register &rd, const Register &rn)
void fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void mneg(const Register &rd, const Register &rn, const Register &rm)
void mvn(const Register &rd, const Operand &operand)
static Instr ImmExtendShift(unsigned left_shift)
uint64_t SizeOfGeneratedCode() const
void dci(Instr raw_inst)
void bfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
void bind(Label *label)
void ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void udiv(const Register &rd, const Register &rn, const Register &rm)
TypeFeedbackId recorded_ast_id_
void DataProcessing3Source(const Register &rd, const Register &rn, const Register &rm, const Register &ra, DataProcessing3SourceOp op)
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static const int kVeneerDistanceMargin
void subs(const Register &rd, const Register &rn, const Operand &operand)
void uxth(const Register &rd, const Register &rn)
void asrv(const Register &rd, const Register &rn, const Register &rm)
static const int kDebugBreakSlotInstructions
void GetCode(CodeDesc *desc)
void ldr_pcrel(const CPURegister &rt, int imm19)
void bfi(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void db(uint8_t data)
void ror(const Register &rd, const Register &rs, unsigned shift)
void fcvtnu(const Register &rd, const FPRegister &fn)
void CheckConstPool(bool force_emit, bool require_jump)
static const int kPatchDebugBreakSlotAddressOffset
void ngc(const Register &rd, const Operand &operand)
void madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
ptrdiff_t InstructionOffset(Instruction *instr) const
static LoadLiteralOp LoadLiteralOpFor(const CPURegister &rt)
void b(Label *label, Condition cond)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmLLiteral(int imm19)
void sxth(const Register &rd, const Register &rn)
void neg(const Register &rd, const Operand &operand)
void AssertSizeOfCodeGeneratedSince(const Label *label, ptrdiff_t size)
void tbz(const Register &rt, unsigned bit_pos, Label *label)
void ldrsw(const Register &rt, const MemOperand &src)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
void ubfx(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
static Instr Rt(CPURegister rt)
static const int kStartOfLabelLinkChain
void frinta(const FPRegister &fd, const FPRegister &fn)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void fcvtzs(const Register &rd, const FPRegister &fn)
static Instr ImmShiftLS(unsigned shift_amount)
static const int kPatchDebugBreakSlotReturnOffset
static Instr ImmSystemRegister(int imm15)
void BlockConstPoolFor(int instructions)
void lslv(const Register &rd, const Register &rn, const Register &rm)
void cmp(const Register &rn, const Operand &operand)
void mov(const Register &rd, const Register &rn)
static Instr ImmLS(int imm9)
static Address break_address_from_return_address(Address pc)
static const int kGap
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
void lsr(const Register &rd, const Register &rn, unsigned shift)
void strb(const Register &rt, const MemOperand &dst)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
void msr(SystemRegister sysreg, const Register &rt)
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data=0)
static bool IsImmAddSub(int64_t immediate)
static Instr ImmDPShift(unsigned amount)
void shift(Register dst, Immediate shift_amount, int subcode, int size)
Assembler(Isolate *arg_isolate, void *buffer, int buffer_size)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void cneg(const Register &rd, const Register &rn, Condition cond)
void dsb(BarrierDomain domain, BarrierType type)
void DeleteUnresolvedBranchInfoForLabel(Label *label)
void adc(const Register &rd, const Register &rn, const Operand &operand)
static Instr ImmFP64(double imm)
void DataProcShiftedRegister(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, Instr op)
void add(const Register &rd, const Register &rn, const Operand &operand)
void ldrb(const Register &rt, const MemOperand &src)
static Instr FPType(FPRegister fd)
void ldrsb(const Register &rt, const MemOperand &src)
void fcvtns(const Register &rd, const FPRegister &fn)
void dmb(BarrierDomain domain, BarrierType type)
void dc32(uint32_t data)
void fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
void cls(const Register &rd, const Register &rn)
bool ShouldEmitVeneers(int margin=kVeneerDistanceMargin)
void fmov(FPRegister fd, Register rn)
static const int kApproxMaxPoolEntryCount
void sbfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
static Instr Rt2(CPURegister rt2)
void rorv(const Register &rd, const Register &rn, const Register &rm)
void fsqrt(const FPRegister &fd, const FPRegister &fn)
static Instr ImmBarrierType(int imm2)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static bool IsImmLSScaled(int64_t offset, LSDataSize size)
void RemoveBranchFromLabelLinkChain(Instruction *branch, Label *label, Instruction *label_veneer=NULL)
void Emit(Instr instruction)
void fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
int InstructionsGeneratedSince(const Label *label)
void scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
Handle< ConstantPoolArray > NewConstantPool(Isolate *isolate)
int unresolved_branches_first_limit() const
static const int kCallSizeWithoutRelocation
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
static Instr ExtendMode(Extend extend)
void smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
static bool IsImmLSPair(int64_t offset, LSDataSize size)
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
void EmitVeneers(bool force_emit, bool need_protection, int margin=kVeneerDistanceMargin)
void bl(Label *label)
void fneg(const FPRegister &fd, const FPRegister &fn)
void dc64(uint64_t data)
void RecordComment(const char *msg)
void FPDataProcessing3Source(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa, FPDataProcessing3SourceOp op)
int LinkAndGetByteOffsetTo(Label *label)
void frintz(const FPRegister &fd, const FPRegister &fn)
void adr(const Register &rd, Label *label)
friend class PositionsRecorder
void adr(const Register &rd, int imm21)
static Instr ImmS(unsigned imms, unsigned reg_size)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
static Instr ShiftDP(Shift shift)
void FPConvertToInt(const Register &rd, const FPRegister &fn, FPIntegerConvertOp op)
static LSDataSize CalcLSDataSize(LoadStoreOp op)
void orr(const Register &rd, const Register &rn, const Operand &operand)
static const int kMaxVeneerCodeSize
bool is_const_pool_blocked() const
void lsl(const Register &rd, const Register &rn, unsigned shift)
void cinc(const Register &rd, const Register &rn, Condition cond)
static Address target_pointer_address_at(Address pc)
void smulh(const Register &rd, const Register &rn, const Register &rm)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void sub(const Register &rd, const Register &rn, const Operand &operand)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
void fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
static Address target_address_from_return_address(Address pc)
void tst(const Register &rn, const Operand &operand)
void SetRecordedAstId(TypeFeedbackId ast_id)
void ldrsh(const Register &rt, const MemOperand &src)
void ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void tbz(const Register &rt, unsigned bit_pos, int imm14)
void bics(const Register &rd, const Register &rn, const Operand &operand)
static Instr ImmCondBranch(int imm19)
void and_(const Register &rd, const Register &rn, const Operand &operand)
void ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void ldr(const CPURegister &rt, const Immediate &imm)
void eon(const Register &rd, const Register &rn, const Operand &operand)
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
void movz(const Register &rd, uint64_t imm, int shift=-1)
static Instr BitN(unsigned bitn, unsigned reg_size)
void b(int imm19, Condition cond)
void cbnz(const Register &rt, int imm19)
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
static Address return_address_from_call_start(Address pc)
void ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void rev16(const Register &rd, const Register &rn)
void bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void MoveWide(const Register &rd, uint64_t imm, int shift, MoveWideImmediateOp mov_op)
void fcvt(const FPRegister &fd, const FPRegister &fn)
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
static const int kDebugBreakSlotLength
static Instr ImmHint(int imm7)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
void adds(const Register &rd, const Register &rn, const Operand &operand)
void LoadStorePairNonTemporal(const CPURegister &rt, const CPURegister &rt2, const MemOperand &addr, LoadStorePairNonTemporalOp op)
void sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void movk(const Register &rd, uint64_t imm, int shift=-1)
void frintm(const FPRegister &fd, const FPRegister &fn)
int LinkAndGetInstructionOffsetTo(Label *label)
static Instr Rn(CPURegister rn)
void orn(const Register &rd, const Register &rn, const Operand &operand)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
void bic(const Register &rd, const Register &rn, const Operand &operand)
void cinv(const Register &rd, const Register &rn, Condition cond)
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
void EmitStringData(const char *string)
void fcvtau(const Register &rd, const FPRegister &fn)
STATIC_ASSERT(kPointerSize==kInt64Size||kPointerSize==kInt32Size)
std::multimap< int, FarBranchInfo > unresolved_branches_
void blr(const Register &xn)
virtual void AbortedCodeGeneration()
static const int kJSRetSequenceInstructions
static LoadStoreOp LoadOpFor(const CPURegister &rt)
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void SetNextConstPoolCheckIn(int instructions)
Instruction * InstructionAt(int offset) const
static const int kPatchReturnSequenceAddressOffset
void nop(NopMarkerTypes n)
void tbnz(const Register &rt, unsigned bit_pos, int imm14)
void fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void DataProcessing1Source(const Register &rd, const Register &rn, DataProcessing1SourceOp op)
void LoadStorePair(const CPURegister &rt, const CPURegister &rt2, const MemOperand &addr, LoadStorePairOp op)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void fcmp(const FPRegister &fn, double value)
static Instr FPScale(unsigned scale)
static const int kVeneerNoProtectionFactor
void negs(const Register &rd, const Operand &operand)
void fcvtas(const Register &rd, const FPRegister &fn)
static Instr ImmUncondBranch(int imm26)
void cbnz(const Register &rt, Label *label)
void dd(uint32_t data)
static Instr ImmAddSub(int64_t imm)
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void cbz(const Register &rt, int imm19)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
static bool IsImmFP64(double imm)
void cbz(const Register &rt, Label *label)
void fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void CheckLabelLinkChain(Label const *label)
void uxtw(const Register &rd, const Register &rn)
void fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
PositionsRecorder positions_recorder_
void fmov(Register rd, FPRegister fn)
void clz(const Register &rd, const Register &rn)
PositionsRecorder * positions_recorder()
void sdiv(const Register &rd, const Register &rn, const Register &rm)
void mrs(const Register &rt, SystemRegister sysreg)
static Instr ImmLSUnsigned(int imm12)
void EmitData(void const *data, unsigned size)
static const int kCallSizeWithRelocation
void sxtb(const Register &rd, const Register &rn)
void debug(const char *message, uint32_t code, Instr params=BREAK)
void ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void rev(const Register &rd, const Register &rn)
int SizeOfCodeGeneratedSince(Label *label)
void ret(const Register &xn=lr)
void asr(const Register &rd, const Register &rn, unsigned shift)
void umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void sbc(const Register &rd, const Register &rn, const Operand &operand)
void FPDataProcessing2Source(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, FPDataProcessing2SourceOp op)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
static bool IsImmConditionalCompare(int64_t immediate)
bool is_veneer_pool_blocked() const
static bool IsImmFP32(float imm)
static Instr ImmTestBranchBit(unsigned bit_pos)
void ldrh(const Register &rt, const MemOperand &src)
static Instr ImmBarrierDomain(int imm2)
void ldr(const CPURegister &rt, const MemOperand &src)
void dc8(uint8_t data)
void FPDataProcessing1Source(const FPRegister &fd, const FPRegister &fn, FPDataProcessing1SourceOp op)
void b(Label *label)
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
static const int kApproxMaxDistToConstPool
static int ConstantPoolSizeAt(Instruction *instr)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static bool IsImmLSUnscaled(int64_t offset)
void csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
bool ShouldEmitVeneer(int max_reachable_pc, int margin=kVeneerDistanceMargin)
void lsrv(const Register &rd, const Register &rn, const Register &rm)
void sbfx(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
EnsureSpace(Assembler *assembler)
static CPURegList GetCallerSavedFP(unsigned size=kDRegSizeInBits)
void Remove(const CPURegList &other)
static CPURegList GetCallerSaved(unsigned size=kXRegSizeInBits)
bool IncludesAliasOf(const CPURegister &other1, const CPURegister &other2=NoCPUReg, const CPURegister &other3=NoCPUReg, const CPURegister &other4=NoCPUReg) const
static CPURegList GetSafepointSavedRegisters()
CPURegister PopHighestIndex()
CPURegister::RegisterType type() const
unsigned RegisterSizeInBits() const
CPURegList(CPURegister::RegisterType type, unsigned size, unsigned first_reg, unsigned last_reg)
CPURegister::RegisterType type_
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
unsigned RegisterSizeInBytes() const
CPURegister PopLowestIndex()
static CPURegList GetCalleeSaved(unsigned size=kXRegSizeInBits)
unsigned TotalSizeInBytes() const
static CPURegList GetCalleeSavedFP(unsigned size=kDRegSizeInBits)
void Combine(const CPURegList &other)
void set_list(RegList new_list)
CPURegList(CPURegister reg1, CPURegister reg2=NoCPUReg, CPURegister reg3=NoCPUReg, CPURegister reg4=NoCPUReg)
ConstPool(Assembler *assm)
std::multimap< uint64_t, int > shared_entries_
void RecordEntry(intptr_t data, RelocInfo::Mode mode)
int SizeIfEmittedAtCurrentPc(bool require_jump)
void Emit(bool require_jump)
std::vector< std::pair< uint64_t, int > > unique_entries_
bool CanBeShared(RelocInfo::Mode mode)
static void FlushICache(void *start, size_t size)
Immediate(Handle< T > handle)
void InitializeHandle(Handle< Object > value)
RelocInfo::Mode rmode() const
static Instruction * Cast(T src)
const Register & regoffset() const
AddrMode addrmode() const
const Register & base() const
unsigned shift_amount() const
static PairResult AreConsistentForPair(const MemOperand &operandA, const MemOperand &operandB, int access_size_log2=kXRegSizeLog2)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
int64_t ImmediateValue() const
unsigned shift_amount() const
bool NeedsRelocation(const Assembler *assembler) const
static Operand UntagSmiAndScale(Register smi, int scale)
static Operand UntagSmi(Register smi)
Operand ToExtendedRegister() const
Immediate immediate() const
PatchingAssembler(Instruction *start, unsigned count)
static const int kAdrFarPatchableNInstrs
PatchingAssembler(byte *start, unsigned count)
void PatchAdrFar(int64_t target_offset)
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
const unsigned kJSSPCode
const int kPointerSize
Definition: globals.h:129
static const int kRegListSizeInBits
const int KB
Definition: globals.h:106
const unsigned kDRegSizeInBits
const int kSmiShift
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register cp
const unsigned kXRegSizeInBits
const unsigned kSPRegInternalCode
const int kSmiTagSize
Definition: v8.h:5743
const DwVfpRegister d31
const Register fp
DwVfpRegister DoubleRegister
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2=NoReg, Register reg3=NoReg, Register reg4=NoReg)
const unsigned kWRegSizeInBits
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const unsigned kNumberOfFPRegisters
const unsigned kRegCodeMask
const DwVfpRegister d29
INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister)
const uint64_t kSmiShiftMask
uint32_t RegList
Definition: frames.h:18
const Register lr
byte * Address
Definition: globals.h:101
const unsigned kXRegSizeLog2
const int kSmiShiftSize
Definition: v8.h:5805
const Register no_reg
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const int kBitsPerByte
Definition: globals.h:162
int CountSetBits(uint64_t value, int width)
@ FLUSH_ICACHE_IF_NEEDED
Definition: assembler.h:293
const DwVfpRegister d30
const unsigned kNumberOfRegisters
const unsigned kInstructionSize
const LowDwVfpRegister d15
uint8_t byte
Definition: globals.h:100
ALIAS_REGISTER(Register, ip0, x16)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
bool Aliases(const CPURegister &other) const
RegisterType type() const
bool Is(const CPURegister &other) const
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
bool IsSameSizeAndType(const CPURegister &other) const
bool is(const CPURegister &other) const
static const unsigned kAllocatableHighRangeBegin
static const unsigned kAllocatableHighRangeEnd
static FPRegister SRegFromCode(unsigned code)
static const int kAllocatableRangeGapSize
static FPRegister from_code(int code)
static const int kMaxNumRegisters
FPRegister(const FPRegister &r)
static const unsigned kAllocatableLowRangeBegin
static const char * AllocationIndexToString(int index)
static int NumAllocatableRegisters()
static const int kMaxNumAllocatableRegisters
static FPRegister Create(unsigned code, unsigned size)
static const unsigned kAllocatableLowRangeEnd
static FPRegister DRegFromCode(unsigned code)
static const RegList kAllocatableFPRegisters
static int NumAllocatableAliasedRegisters()
static int ToAllocationIndex(FPRegister reg)
FPRegister(const CPURegister &r)
static FPRegister FromAllocationIndex(unsigned int index)
static const int kNumRegisters
Definition: assembler-arm.h:95
static const int kAllocatableRangeGapSize
static int NumAllocatableRegisters()
static const unsigned kAllocatableHighRangeBegin
static const unsigned kAllocatableLowRangeEnd
static const unsigned kAllocatableHighRangeEnd
static Register WRegFromCode(unsigned code)
static Register from_code(int code)
static int ToAllocationIndex(Register reg)
Register(const Register &r)
static Register XRegFromCode(unsigned code)
static const char * AllocationIndexToString(int index)
static int NumAllocatableRegisters()
static Register Create(unsigned code, unsigned size)
static Register FromAllocationIndex(unsigned index)
static const int kMaxNumAllocatableRegisters
Definition: assembler-arm.h:96
static const unsigned kAllocatableContext
static const unsigned kAllocatableLowRangeBegin
Register(const CPURegister &r)
#define T(name, string, precedence)
Definition: token.cc:25