V8 Project
assembler-arm64-inl.h
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_INL_H_
7 
9 #include "src/assembler.h"
10 #include "src/debug.h"
11 
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 bool CpuFeatures::SupportsCrankshaft() { return true; }
18 
19 
20 void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
21  UNIMPLEMENTED();
22 }
23 
24 
25 void RelocInfo::set_target_address(Address target,
26  WriteBarrierMode write_barrier_mode,
27  ICacheFlushMode icache_flush_mode) {
29  Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
30  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
32  Object* target_code = Code::GetCodeFromTargetAddress(target);
33  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
34  host(), this, HeapObject::cast(target_code));
35  }
36 }
37 
38 
39 inline unsigned CPURegister::code() const {
40  DCHECK(IsValid());
41  return reg_code;
42 }
43 
44 
47  return reg_type;
48 }
49 
50 
51 inline RegList CPURegister::Bit() const {
52  DCHECK(reg_code < (sizeof(RegList) * kBitsPerByte));
53  return IsValid() ? 1UL << reg_code : 0;
54 }
55 
56 
57 inline unsigned CPURegister::SizeInBits() const {
58  DCHECK(IsValid());
59  return reg_size;
60 }
61 
62 
63 inline int CPURegister::SizeInBytes() const {
64  DCHECK(IsValid());
65  DCHECK(SizeInBits() % 8 == 0);
66  return reg_size / 8;
67 }
68 
69 
70 inline bool CPURegister::Is32Bits() const {
71  DCHECK(IsValid());
72  return reg_size == 32;
73 }
74 
75 
76 inline bool CPURegister::Is64Bits() const {
77  DCHECK(IsValid());
78  return reg_size == 64;
79 }
80 
81 
82 inline bool CPURegister::IsValid() const {
84  DCHECK(!IsNone());
85  return true;
86  } else {
87  DCHECK(IsNone());
88  return false;
89  }
90 }
91 
92 
93 inline bool CPURegister::IsValidRegister() const {
94  return IsRegister() &&
97 }
98 
99 
100 inline bool CPURegister::IsValidFPRegister() const {
101  return IsFPRegister() &&
104 }
105 
106 
107 inline bool CPURegister::IsNone() const {
108  // kNoRegister types should always have size 0 and code 0.
109  DCHECK((reg_type != kNoRegister) || (reg_code == 0));
110  DCHECK((reg_type != kNoRegister) || (reg_size == 0));
111 
112  return reg_type == kNoRegister;
113 }
114 
115 
116 inline bool CPURegister::Is(const CPURegister& other) const {
117  DCHECK(IsValidOrNone() && other.IsValidOrNone());
118  return Aliases(other) && (reg_size == other.reg_size);
119 }
120 
121 
122 inline bool CPURegister::Aliases(const CPURegister& other) const {
123  DCHECK(IsValidOrNone() && other.IsValidOrNone());
124  return (reg_code == other.reg_code) && (reg_type == other.reg_type);
125 }
126 
127 
128 inline bool CPURegister::IsRegister() const {
129  return reg_type == kRegister;
130 }
131 
132 
133 inline bool CPURegister::IsFPRegister() const {
134  return reg_type == kFPRegister;
135 }
136 
137 
138 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
139  return (reg_size == other.reg_size) && (reg_type == other.reg_type);
140 }
141 
142 
143 inline bool CPURegister::IsValidOrNone() const {
144  return IsValid() || IsNone();
145 }
146 
147 
148 inline bool CPURegister::IsZero() const {
149  DCHECK(IsValid());
150  return IsRegister() && (reg_code == kZeroRegCode);
151 }
152 
153 
154 inline bool CPURegister::IsSP() const {
155  DCHECK(IsValid());
156  return IsRegister() && (reg_code == kSPRegInternalCode);
157 }
158 
159 
160 inline void CPURegList::Combine(const CPURegList& other) {
161  DCHECK(IsValid());
162  DCHECK(other.type() == type_);
163  DCHECK(other.RegisterSizeInBits() == size_);
164  list_ |= other.list();
165 }
166 
167 
168 inline void CPURegList::Remove(const CPURegList& other) {
169  DCHECK(IsValid());
170  if (other.type() == type_) {
171  list_ &= ~other.list();
172  }
173 }
174 
175 
176 inline void CPURegList::Combine(const CPURegister& other) {
177  DCHECK(other.type() == type_);
178  DCHECK(other.SizeInBits() == size_);
179  Combine(other.code());
180 }
181 
182 
183 inline void CPURegList::Remove(const CPURegister& other1,
184  const CPURegister& other2,
185  const CPURegister& other3,
186  const CPURegister& other4) {
187  if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
188  if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
189  if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
190  if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
191 }
192 
193 
194 inline void CPURegList::Combine(int code) {
195  DCHECK(IsValid());
197  list_ |= (1UL << code);
198 }
199 
200 
201 inline void CPURegList::Remove(int code) {
202  DCHECK(IsValid());
204  list_ &= ~(1UL << code);
205 }
206 
207 
208 inline Register Register::XRegFromCode(unsigned code) {
209  if (code == kSPRegInternalCode) {
210  return csp;
211  } else {
214  }
215 }
216 
217 
218 inline Register Register::WRegFromCode(unsigned code) {
219  if (code == kSPRegInternalCode) {
220  return wcsp;
221  } else {
224  }
225 }
226 
227 
228 inline FPRegister FPRegister::SRegFromCode(unsigned code) {
231 }
232 
233 
234 inline FPRegister FPRegister::DRegFromCode(unsigned code) {
237 }
238 
239 
240 inline Register CPURegister::W() const {
243 }
244 
245 
246 inline Register CPURegister::X() const {
249 }
250 
251 
252 inline FPRegister CPURegister::S() const {
255 }
256 
257 
258 inline FPRegister CPURegister::D() const {
261 }
262 
263 
264 // Immediate.
265 // Default initializer is for int types
266 template<typename T>
268  static const bool kIsIntType = true;
269  static inline RelocInfo::Mode rmode_for(T) {
270  return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
271  }
272  static inline int64_t immediate_for(T t) {
273  STATIC_ASSERT(sizeof(T) <= 8);
274  return t;
275  }
276 };
277 
278 
279 template<>
281  static const bool kIsIntType = false;
282  static inline RelocInfo::Mode rmode_for(Smi* t) {
283  return RelocInfo::NONE64;
284  }
285  static inline int64_t immediate_for(Smi* t) {;
286  return reinterpret_cast<int64_t>(t);
287  }
288 };
289 
290 
291 template<>
292 struct ImmediateInitializer<ExternalReference> {
293  static const bool kIsIntType = false;
294  static inline RelocInfo::Mode rmode_for(ExternalReference t) {
296  }
297  static inline int64_t immediate_for(ExternalReference t) {;
298  return reinterpret_cast<int64_t>(t.address());
299  }
300 };
301 
302 
303 template<typename T>
306 }
307 
308 
309 template<typename T>
311  : value_(ImmediateInitializer<T>::immediate_for(t)),
312  rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
313 
314 
315 template<typename T>
317  : value_(ImmediateInitializer<T>::immediate_for(t)),
318  rmode_(rmode) {
320 }
321 
322 
323 // Operand.
324 template<typename T>
325 Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
326 
327 
328 template<typename T>
329 Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
330 
331 
332 template<typename T>
334  : immediate_(t, rmode),
335  reg_(NoReg) {}
336 
337 
338 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
339  : immediate_(0),
340  reg_(reg),
341  shift_(shift),
342  extend_(NO_EXTEND),
343  shift_amount_(shift_amount) {
346  DCHECK(!reg.IsSP());
347 }
348 
349 
350 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
351  : immediate_(0),
352  reg_(reg),
353  shift_(NO_SHIFT),
354  extend_(extend),
355  shift_amount_(shift_amount) {
356  DCHECK(reg.IsValid());
357  DCHECK(shift_amount <= 4);
358  DCHECK(!reg.IsSP());
359 
360  // Extend modes SXTX and UXTX require a 64-bit register.
361  DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
362 }
363 
364 
365 bool Operand::IsImmediate() const {
366  return reg_.Is(NoReg);
367 }
368 
369 
371  return reg_.IsValid() && (shift_ != NO_SHIFT);
372 }
373 
374 
376  return reg_.IsValid() && (extend_ != NO_EXTEND);
377 }
378 
379 
380 bool Operand::IsZero() const {
381  if (IsImmediate()) {
382  return ImmediateValue() == 0;
383  } else {
384  return reg().IsZero();
385  }
386 }
387 
388 
391  DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
392  return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
393 }
394 
395 
397  DCHECK(IsImmediate());
398  return immediate_;
399 }
400 
401 
402 int64_t Operand::ImmediateValue() const {
403  DCHECK(IsImmediate());
404  return immediate_.value();
405 }
406 
407 
410  return reg_;
411 }
412 
413 
416  return shift_;
417 }
418 
419 
422  return extend_;
423 }
424 
425 
426 unsigned Operand::shift_amount() const {
428  return shift_amount_;
429 }
430 
431 
433  STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
434  kSmiValueSize));
435  DCHECK(smi.Is64Bits());
436  return Operand(smi, ASR, kSmiShift);
437 }
438 
439 
441  STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
442  kSmiValueSize));
443  DCHECK(smi.Is64Bits());
444  DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
445  if (scale > kSmiShift) {
446  return Operand(smi, LSL, scale - kSmiShift);
447  } else if (scale < kSmiShift) {
448  return Operand(smi, ASR, kSmiShift - scale);
449  }
450  return Operand(smi);
451 }
452 
453 
455  : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset),
456  shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
457 }
458 
459 
460 MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
461  : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
462  shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
463  DCHECK(base.Is64Bits() && !base.IsZero());
464 }
465 
466 
468  Register regoffset,
469  Extend extend,
470  unsigned shift_amount)
471  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
472  shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
473  DCHECK(base.Is64Bits() && !base.IsZero());
474  DCHECK(!regoffset.IsSP());
475  DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
476 
477  // SXTX extend mode requires a 64-bit offset register.
478  DCHECK(regoffset.Is64Bits() || (extend != SXTX));
479 }
480 
481 
483  Register regoffset,
484  Shift shift,
485  unsigned shift_amount)
486  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
487  shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
488  DCHECK(base.Is64Bits() && !base.IsZero());
490  DCHECK(shift == LSL);
491 }
492 
493 
494 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
495  : base_(base), addrmode_(addrmode) {
496  DCHECK(base.Is64Bits() && !base.IsZero());
497 
498  if (offset.IsImmediate()) {
499  offset_ = offset.ImmediateValue();
500 
501  regoffset_ = NoReg;
502  } else if (offset.IsShiftedRegister()) {
503  DCHECK(addrmode == Offset);
504 
505  regoffset_ = offset.reg();
506  shift_= offset.shift();
507  shift_amount_ = offset.shift_amount();
508 
509  extend_ = NO_EXTEND;
510  offset_ = 0;
511 
512  // These assertions match those in the shifted-register constructor.
514  DCHECK(shift_ == LSL);
515  } else {
516  DCHECK(offset.IsExtendedRegister());
517  DCHECK(addrmode == Offset);
518 
519  regoffset_ = offset.reg();
520  extend_ = offset.extend();
521  shift_amount_ = offset.shift_amount();
522 
523  shift_= NO_SHIFT;
524  offset_ = 0;
525 
526  // These assertions match those in the extended-register constructor.
527  DCHECK(!regoffset_.IsSP());
528  DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
529  DCHECK((regoffset_.Is64Bits() || (extend_ != SXTX)));
530  }
531 }
532 
534  return (addrmode_ == Offset) && regoffset_.Is(NoReg);
535 }
536 
537 
539  return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
540 }
541 
542 
544  return addrmode_ == PreIndex;
545 }
546 
547 
549  return addrmode_ == PostIndex;
550 }
551 
553  if (IsImmediateOffset()) {
554  return offset();
555  } else {
557  if (extend() == NO_EXTEND) {
558  return Operand(regoffset(), shift(), shift_amount());
559  } else {
560  return Operand(regoffset(), extend(), shift_amount());
561  }
562  }
563 }
564 
565 
567 #ifdef USE_SIMULATOR
568  debug("UNREACHABLE", __LINE__, BREAK);
569 #else
570  // Crash by branching to 0. lr now points near the fault.
571  Emit(BLR | Rn(xzr));
572 #endif
573 }
574 
575 
577  Instruction* instr = reinterpret_cast<Instruction*>(pc);
578  DCHECK(instr->IsLdrLiteralX());
579  return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
580 }
581 
582 
583 // Read/Modify the code target address in the branch/call instruction at pc.
585  ConstantPoolArray* constant_pool) {
587 }
588 
589 
591  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
592  return target_address_at(pc, constant_pool);
593 }
594 
595 
597  // Returns the address of the call target from the return address that will
598  // be returned to after a call.
599  // Call sequence on ARM64 is:
600  // ldr ip0, #... @ load from literal pool
601  // blr ip0
602  Address candidate = pc - 2 * kInstructionSize;
603  Instruction* instr = reinterpret_cast<Instruction*>(candidate);
604  USE(instr);
605  DCHECK(instr->IsLdrLiteralX());
606  return candidate;
607 }
608 
609 
612 }
613 
614 
616  // The call, generated by MacroAssembler::Call, is one of two possible
617  // sequences:
618  //
619  // Without relocation:
620  // movz temp, #(target & 0x000000000000ffff)
621  // movk temp, #(target & 0x00000000ffff0000)
622  // movk temp, #(target & 0x0000ffff00000000)
623  // blr temp
624  //
625  // With relocation:
626  // ldr temp, =target
627  // blr temp
628  //
629  // The return address is immediately after the blr instruction in both cases,
630  // so it can be found by adding the call size to the address at the start of
631  // the call sequence.
634 
635  Instruction* instr = reinterpret_cast<Instruction*>(pc);
636  if (instr->IsMovz()) {
637  // Verify the instruction sequence.
638  DCHECK(instr->following(1)->IsMovk());
639  DCHECK(instr->following(2)->IsMovk());
640  DCHECK(instr->following(3)->IsBranchAndLinkToRegister());
642  } else {
643  // Verify the instruction sequence.
644  DCHECK(instr->IsLdrLiteralX());
645  DCHECK(instr->following(1)->IsBranchAndLinkToRegister());
647  }
648 }
649 
650 
652  Address constant_pool_entry, Code* code, Address target) {
653  Memory::Address_at(constant_pool_entry) = target;
654 }
655 
656 
658  ConstantPoolArray* constant_pool,
659  Address target,
660  ICacheFlushMode icache_flush_mode) {
662  // Intuitively, we would think it is necessary to always flush the
663  // instruction cache after patching a target address in the code as follows:
664  // CpuFeatures::FlushICache(pc, sizeof(target));
665  // However, on ARM, an instruction is actually patched in the case of
666  // embedded constants of the form:
667  // ldr ip, [pc, #...]
668  // since the instruction accessing this address in the constant pool remains
669  // unchanged, a flush is not required.
670 }
671 
672 
674  Code* code,
675  Address target,
676  ICacheFlushMode icache_flush_mode) {
677  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
678  set_target_address_at(pc, constant_pool, target, icache_flush_mode);
679 }
680 
681 
682 int RelocInfo::target_address_size() {
683  return kPointerSize;
684 }
685 
686 
687 Address RelocInfo::target_address() {
690 }
691 
692 
693 Address RelocInfo::target_address_address() {
695  || rmode_ == EMBEDDED_OBJECT
696  || rmode_ == EXTERNAL_REFERENCE);
698 }
699 
700 
701 Address RelocInfo::constant_pool_entry_address() {
704 }
705 
706 
707 Object* RelocInfo::target_object() {
709  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
710 }
711 
712 
713 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
715  return Handle<Object>(reinterpret_cast<Object**>(
717 }
718 
719 
720 void RelocInfo::set_target_object(Object* target,
721  WriteBarrierMode write_barrier_mode,
722  ICacheFlushMode icache_flush_mode) {
725  reinterpret_cast<Address>(target),
726  icache_flush_mode);
727  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
728  host() != NULL &&
729  target->IsHeapObject()) {
730  host()->GetHeap()->incremental_marking()->RecordWrite(
731  host(), &Memory::Object_at(pc_), HeapObject::cast(target));
732  }
733 }
734 
735 
736 Address RelocInfo::target_reference() {
739 }
740 
741 
742 Address RelocInfo::target_runtime_entry(Assembler* origin) {
744  return target_address();
745 }
746 
747 
748 void RelocInfo::set_target_runtime_entry(Address target,
749  WriteBarrierMode write_barrier_mode,
750  ICacheFlushMode icache_flush_mode) {
752  if (target_address() != target) {
753  set_target_address(target, write_barrier_mode, icache_flush_mode);
754  }
755 }
756 
757 
758 Handle<Cell> RelocInfo::target_cell_handle() {
759  UNIMPLEMENTED();
760  Cell *null_cell = NULL;
761  return Handle<Cell>(null_cell);
762 }
763 
764 
765 Cell* RelocInfo::target_cell() {
768 }
769 
770 
771 void RelocInfo::set_target_cell(Cell* cell,
772  WriteBarrierMode write_barrier_mode,
773  ICacheFlushMode icache_flush_mode) {
774  UNIMPLEMENTED();
775 }
776 
777 
780 
781 
782 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
783  UNREACHABLE(); // This should never be reached on ARM64.
784  return Handle<Object>();
785 }
786 
787 
788 Code* RelocInfo::code_age_stub() {
790  // Read the stub entry point from the code age sequence.
791  Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
792  return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
793 }
794 
795 
796 void RelocInfo::set_code_age_stub(Code* stub,
797  ICacheFlushMode icache_flush_mode) {
799  DCHECK(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
800  // Overwrite the stub entry point in the code age sequence. This is loaded as
801  // a literal so there is no need to call FlushICache here.
802  Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
803  Memory::Address_at(stub_entry_address) = stub->instruction_start();
804 }
805 
806 
807 Address RelocInfo::call_address() {
808  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
809  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
810  // For the above sequences the Relocinfo points to the load literal loading
811  // the call address.
813 }
814 
815 
816 void RelocInfo::set_call_address(Address target) {
817  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
818  (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
820  if (host() != NULL) {
821  Object* target_code = Code::GetCodeFromTargetAddress(target);
822  host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
823  host(), this, HeapObject::cast(target_code));
824  }
825 }
826 
827 
828 void RelocInfo::WipeOut() {
830  IsCodeTarget(rmode_) ||
834 }
835 
836 
837 bool RelocInfo::IsPatchedReturnSequence() {
838  // The sequence must be:
839  // ldr ip0, [pc, #offset]
840  // blr ip0
841  // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
842  Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
843  Instruction* i2 = i1->following();
844  return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
845  i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
846 }
847 
848 
849 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
850  Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
851  return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
852 }
853 
854 
855 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
858  visitor->VisitEmbeddedPointer(this);
859  } else if (RelocInfo::IsCodeTarget(mode)) {
860  visitor->VisitCodeTarget(this);
861  } else if (mode == RelocInfo::CELL) {
862  visitor->VisitCell(this);
863  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
864  visitor->VisitExternalReference(this);
865  } else if (((RelocInfo::IsJSReturn(mode) &&
866  IsPatchedReturnSequence()) ||
868  IsPatchedDebugBreakSlotSequence())) &&
869  isolate->debug()->has_break_points()) {
870  visitor->VisitDebugTarget(this);
871  } else if (RelocInfo::IsRuntimeEntry(mode)) {
872  visitor->VisitRuntimeEntry(this);
873  }
874 }
875 
876 
877 template<typename StaticVisitor>
878 void RelocInfo::Visit(Heap* heap) {
881  StaticVisitor::VisitEmbeddedPointer(heap, this);
882  } else if (RelocInfo::IsCodeTarget(mode)) {
883  StaticVisitor::VisitCodeTarget(heap, this);
884  } else if (mode == RelocInfo::CELL) {
885  StaticVisitor::VisitCell(heap, this);
886  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
887  StaticVisitor::VisitExternalReference(this);
888  } else if (heap->isolate()->debug()->has_break_points() &&
890  IsPatchedReturnSequence()) ||
892  IsPatchedDebugBreakSlotSequence()))) {
893  StaticVisitor::VisitDebugTarget(heap, this);
894  } else if (RelocInfo::IsRuntimeEntry(mode)) {
895  StaticVisitor::VisitRuntimeEntry(this);
896  }
897 }
898 
899 
901  DCHECK(rt.IsValid());
902  if (rt.IsRegister()) {
903  return rt.Is64Bits() ? LDR_x : LDR_w;
904  } else {
905  DCHECK(rt.IsFPRegister());
906  return rt.Is64Bits() ? LDR_d : LDR_s;
907  }
908 }
909 
910 
912  const CPURegister& rt2) {
913  DCHECK(AreSameSizeAndType(rt, rt2));
914  USE(rt2);
915  if (rt.IsRegister()) {
916  return rt.Is64Bits() ? LDP_x : LDP_w;
917  } else {
918  DCHECK(rt.IsFPRegister());
919  return rt.Is64Bits() ? LDP_d : LDP_s;
920  }
921 }
922 
923 
925  DCHECK(rt.IsValid());
926  if (rt.IsRegister()) {
927  return rt.Is64Bits() ? STR_x : STR_w;
928  } else {
929  DCHECK(rt.IsFPRegister());
930  return rt.Is64Bits() ? STR_d : STR_s;
931  }
932 }
933 
934 
936  const CPURegister& rt2) {
937  DCHECK(AreSameSizeAndType(rt, rt2));
938  USE(rt2);
939  if (rt.IsRegister()) {
940  return rt.Is64Bits() ? STP_x : STP_w;
941  } else {
942  DCHECK(rt.IsFPRegister());
943  return rt.Is64Bits() ? STP_d : STP_s;
944  }
945 }
946 
947 
949  const CPURegister& rt, const CPURegister& rt2) {
950  DCHECK(AreSameSizeAndType(rt, rt2));
951  USE(rt2);
952  if (rt.IsRegister()) {
953  return rt.Is64Bits() ? LDNP_x : LDNP_w;
954  } else {
955  DCHECK(rt.IsFPRegister());
956  return rt.Is64Bits() ? LDNP_d : LDNP_s;
957  }
958 }
959 
960 
962  const CPURegister& rt, const CPURegister& rt2) {
963  DCHECK(AreSameSizeAndType(rt, rt2));
964  USE(rt2);
965  if (rt.IsRegister()) {
966  return rt.Is64Bits() ? STNP_x : STNP_w;
967  } else {
968  DCHECK(rt.IsFPRegister());
969  return rt.Is64Bits() ? STNP_d : STNP_s;
970  }
971 }
972 
973 
975  if (rt.IsRegister()) {
976  return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
977  } else {
978  DCHECK(rt.IsFPRegister());
979  return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
980  }
981 }
982 
983 
986  int offset = LinkAndGetByteOffsetTo(label);
988  return offset >> kInstructionSizeLog2;
989 }
990 
991 
993  if (S == SetFlags) {
994  return 1 << FlagsUpdate_offset;
995  } else if (S == LeaveFlags) {
996  return 0 << FlagsUpdate_offset;
997  }
998  UNREACHABLE();
999  return 0;
1000 }
1001 
1002 
1004  return cond << Condition_offset;
1005 }
1006 
1007 
1009  CHECK(is_int21(imm21));
1010  Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
1011  Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
1012  Instr immlo = imm << ImmPCRelLo_offset;
1013  return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
1014 }
1015 
1016 
1018  CHECK(is_int26(imm26));
1019  return truncate_to_int26(imm26) << ImmUncondBranch_offset;
1020 }
1021 
1022 
1024  CHECK(is_int19(imm19));
1025  return truncate_to_int19(imm19) << ImmCondBranch_offset;
1026 }
1027 
1028 
1030  CHECK(is_int19(imm19));
1031  return truncate_to_int19(imm19) << ImmCmpBranch_offset;
1032 }
1033 
1034 
1036  CHECK(is_int14(imm14));
1037  return truncate_to_int14(imm14) << ImmTestBranch_offset;
1038 }
1039 
1040 
1042  DCHECK(is_uint6(bit_pos));
1043  // Subtract five from the shift offset, as we need bit 5 from bit_pos.
1044  unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
1045  unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
1046  b5 &= ImmTestBranchBit5_mask;
1047  b40 &= ImmTestBranchBit40_mask;
1048  return b5 | b40;
1049 }
1050 
1051 
1053  return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
1054 }
1055 
1056 
1058  DCHECK(IsImmAddSub(imm));
1059  if (is_uint12(imm)) { // No shift required.
1060  return imm << ImmAddSub_offset;
1061  } else {
1062  return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
1063  }
1064 }
1065 
1066 
1067 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
1068  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
1069  ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
1070  USE(reg_size);
1071  return imms << ImmS_offset;
1072 }
1073 
1074 
1075 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
1076  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1077  ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1078  USE(reg_size);
1079  DCHECK(is_uint6(immr));
1080  return immr << ImmR_offset;
1081 }
1082 
1083 
1084 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
1085  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1086  DCHECK(is_uint6(imms));
1087  DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
1088  USE(reg_size);
1089  return imms << ImmSetBits_offset;
1090 }
1091 
1092 
1093 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
1094  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1095  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1096  ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1097  USE(reg_size);
1098  return immr << ImmRotate_offset;
1099 }
1100 
1101 
1103  CHECK(is_int19(imm19));
1104  return truncate_to_int19(imm19) << ImmLLiteral_offset;
1105 }
1106 
1107 
1108 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
1109  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1110  DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
1111  USE(reg_size);
1112  return bitn << BitN_offset;
1113 }
1114 
1115 
1117  DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
1118  return shift << ShiftDP_offset;
1119 }
1120 
1121 
1122 Instr Assembler::ImmDPShift(unsigned amount) {
1123  DCHECK(is_uint6(amount));
1124  return amount << ImmDPShift_offset;
1125 }
1126 
1127 
1129  return extend << ExtendMode_offset;
1130 }
1131 
1132 
1133 Instr Assembler::ImmExtendShift(unsigned left_shift) {
1134  DCHECK(left_shift <= 4);
1135  return left_shift << ImmExtendShift_offset;
1136 }
1137 
1138 
1140  DCHECK(is_uint5(imm));
1141  return imm << ImmCondCmp_offset;
1142 }
1143 
1144 
1146  return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1147 }
1148 
1149 
1151  DCHECK(is_uint12(imm12));
1152  return imm12 << ImmLSUnsigned_offset;
1153 }
1154 
1155 
1157  DCHECK(is_int9(imm9));
1158  return truncate_to_int9(imm9) << ImmLS_offset;
1159 }
1160 
1161 
1163  DCHECK(((imm7 >> size) << size) == imm7);
1164  int scaled_imm7 = imm7 >> size;
1165  DCHECK(is_int7(scaled_imm7));
1166  return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1167 }
1168 
1169 
1170 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1171  DCHECK(is_uint1(shift_amount));
1172  return shift_amount << ImmShiftLS_offset;
1173 }
1174 
1175 
1177  DCHECK(is_uint16(imm16));
1178  return imm16 << ImmException_offset;
1179 }
1180 
1181 
1183  DCHECK(is_uint15(imm15));
1184  return imm15 << ImmSystemRegister_offset;
1185 }
1186 
1187 
1189  DCHECK(is_uint7(imm7));
1190  return imm7 << ImmHint_offset;
1191 }
1192 
1193 
1195  DCHECK(is_uint2(imm2));
1196  return imm2 << ImmBarrierDomain_offset;
1197 }
1198 
1199 
1201  DCHECK(is_uint2(imm2));
1202  return imm2 << ImmBarrierType_offset;
1203 }
1204 
1205 
1207  DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1208  return static_cast<LSDataSize>(op >> SizeLS_offset);
1209 }
1210 
1211 
1213  DCHECK(is_uint16(imm));
1214  return imm << ImmMoveWide_offset;
1215 }
1216 
1217 
1219  DCHECK(is_uint2(shift));
1220  return shift << ShiftMoveWide_offset;
1221 }
1222 
1223 
1225  return fd.Is64Bits() ? FP64 : FP32;
1226 }
1227 
1228 
1229 Instr Assembler::FPScale(unsigned scale) {
1230  DCHECK(is_uint6(scale));
1231  return scale << FPScale_offset;
1232 }
1233 
1234 
1236  return reg.Is64Bits() ? xzr : wzr;
1237 }
1238 
1239 
1241  DCHECK(pc_ < (buffer_ + buffer_size_));
1242  if (buffer_space() < kGap) {
1243  GrowBuffer();
1244  }
1245 }
1246 
1247 
1248 inline void Assembler::CheckBuffer() {
1249  CheckBufferSpace();
1251  CheckVeneerPool(false, true);
1252  }
1254  CheckConstPool(false, true);
1255  }
1256 }
1257 
1258 
1259 TypeFeedbackId Assembler::RecordedAstId() {
1261  return recorded_ast_id_;
1262 }
1263 
1264 
1267 }
1268 
1269 
1270 } } // namespace v8::internal
1271 
1272 #endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
static Instr Flags(FlagsUpdate S)
static Instr SF(Register rd)
static Instr ImmR(unsigned immr, unsigned reg_size)
static Instr ImmTestBranch(int imm14)
static Instr ImmCondCmp(unsigned imm)
Instruction * pc() const
static Instr ImmMoveWide(uint64_t imm)
static Instr ImmCmpBranch(int imm19)
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmLSPair(int imm7, LSDataSize size)
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
static Instr ImmPCRelAddress(int imm21)
static Instr ImmException(int imm16)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
TypeFeedbackId RecordedAstId()
static Instr Cond(Condition cond)
static Instr ImmExtendShift(unsigned left_shift)
TypeFeedbackId recorded_ast_id_
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
void CheckConstPool(bool force_emit, bool require_jump)
static LoadLiteralOp LoadLiteralOpFor(const CPURegister &rt)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmLLiteral(int imm19)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
static const int kStartOfLabelLinkChain
static Instr ImmShiftLS(unsigned shift_amount)
static const int kPatchDebugBreakSlotReturnOffset
static Instr ImmSystemRegister(int imm15)
static Instr ImmLS(int imm9)
static Address break_address_from_return_address(Address pc)
static const int kGap
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
static bool IsImmAddSub(int64_t immediate)
static Instr ImmDPShift(unsigned amount)
void shift(Register dst, Immediate shift_amount, int subcode, int size)
static Instr FPType(FPRegister fd)
static Instr ImmBarrierType(int imm2)
void Emit(Instr instruction)
static const int kCallSizeWithoutRelocation
static Instr ExtendMode(Extend extend)
int LinkAndGetByteOffsetTo(Label *label)
static Instr ImmS(unsigned imms, unsigned reg_size)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
static Instr ShiftDP(Shift shift)
static LSDataSize CalcLSDataSize(LoadStoreOp op)
static Address target_pointer_address_at(Address pc)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static Address target_address_from_return_address(Address pc)
static Instr ImmCondBranch(int imm19)
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static Instr BitN(unsigned bitn, unsigned reg_size)
static Address return_address_from_call_start(Address pc)
static Instr ImmHint(int imm7)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
int LinkAndGetInstructionOffsetTo(Label *label)
static Instr Rn(CPURegister rn)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
STATIC_ASSERT(kPointerSize==kInt64Size||kPointerSize==kInt32Size)
static LoadStoreOp LoadOpFor(const CPURegister &rt)
static Instr FPScale(unsigned scale)
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
static Instr ImmUncondBranch(int imm26)
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
static const int kCallSizeWithRelocation
void debug(const char *message, uint32_t code, Instr params=BREAK)
static Instr ImmTestBranchBit(unsigned bit_pos)
static Instr ImmBarrierDomain(int imm2)
const Register & AppropriateZeroRegFor(const CPURegister &reg) const
void Remove(const CPURegList &other)
CPURegister::RegisterType type() const
unsigned RegisterSizeInBits() const
CPURegister::RegisterType type_
void Combine(const CPURegList &other)
static Cell * FromValueAddress(Address value)
Definition: objects.h:9431
ConstantPoolArray * constant_pool()
Definition: objects-inl.h:4942
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
Heap * GetHeap() const
Definition: objects-inl.h:1379
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
Immediate(Handle< T > handle)
void InitializeHandle(Handle< Object > value)
Instruction * ImmPCOffsetTarget()
const Register & regoffset() const
AddrMode addrmode() const
const Register & base() const
unsigned shift_amount() const
static Object *& Object_at(Address addr)
Definition: v8memory.h:60
static Address & Address_at(Address addr)
Definition: v8memory.h:56
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
int64_t ImmediateValue() const
unsigned shift_amount() const
static Operand UntagSmiAndScale(Register smi, int scale)
static Operand UntagSmi(Register smi)
Operand ToExtendedRegister() const
Immediate immediate() const
static bool IsDebugBreakSlot(Mode mode)
Definition: assembler.h:436
static bool IsJSReturn(Mode mode)
Definition: assembler.h:412
static bool IsEmbeddedObject(Mode mode)
Definition: assembler.h:402
static bool IsRuntimeEntry(Mode mode)
Definition: assembler.h:405
static bool IsCodeTarget(Mode mode)
Definition: assembler.h:399
static bool IsExternalReference(Mode mode)
Definition: assembler.h:430
Code * host() const
Definition: assembler.h:463
Mode rmode() const
Definition: assembler.h:459
static TypeFeedbackId None()
Definition: utils.h:945
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define UNIMPLEMENTED()
Definition: logging.h:28
#define DCHECK(condition)
Definition: logging.h:205
void USE(T)
Definition: macros.h:322
bool AreSameSizeAndType(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoCPUReg, const CPURegister &reg4=NoCPUReg, const CPURegister &reg5=NoCPUReg, const CPURegister &reg6=NoCPUReg, const CPURegister &reg7=NoCPUReg, const CPURegister &reg8=NoCPUReg)
const int kPointerSize
Definition: globals.h:129
const unsigned kDRegSizeInBits
const int kSmiShift
@ UPDATE_WRITE_BARRIER
Definition: objects.h:235
const unsigned kXRegSizeInBits
const unsigned kSPRegInternalCode
const unsigned kWRegSizeInBits
kSerializedDataOffset Object
Definition: objects-inl.h:5322
const unsigned kSRegSizeInBits
const unsigned kNumberOfFPRegisters
const Register pc
byte * Address
Definition: globals.h:101
const int kSmiValueSize
Definition: v8.h:5806
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const int kBitsPerByte
Definition: globals.h:162
const unsigned kZeroRegCode
static const int kCodeAgeStubEntryOffset
const unsigned kNumberOfRegisters
static const int kNoCodeAgeSequenceLength
bool IsAligned(T value, U alignment)
Definition: utils.h:123
const unsigned kInstructionSize
const unsigned kInstructionSizeLog2
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
bool Aliases(const CPURegister &other) const
RegisterType type() const
bool Is(const CPURegister &other) const
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
bool IsSameSizeAndType(const CPURegister &other) const
static FPRegister SRegFromCode(unsigned code)
static FPRegister Create(unsigned code, unsigned size)
static FPRegister DRegFromCode(unsigned code)
static RelocInfo::Mode rmode_for(ExternalReference t)
static RelocInfo::Mode rmode_for(T)
static Register WRegFromCode(unsigned code)
static Register XRegFromCode(unsigned code)
static Register Create(unsigned code, unsigned size)
#define T(name, string, precedence)
Definition: token.cc:25